Change in vdsm[master]: tests: JsonRpcServer tests suite
by Piotr Kliczewski
Piotr Kliczewski has uploaded a new change for review.
Change subject: tests: JsonRpcServer tests suite
......................................................................
tests: JsonRpcServer tests suite
Change-Id: I1090764c7289544abe331a13ec765ceed2a53afe
Signed-off-by: pkliczewski <piotr.kliczewski(a)gmail.com>
---
M lib/yajsonrpc/__init__.py
M tests/Makefile.am
A tests/jsonrpcServerTests.py
3 files changed, 133 insertions(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/81/43581/1
diff --git a/lib/yajsonrpc/__init__.py b/lib/yajsonrpc/__init__.py
index 68af242..a307c7d 100644
--- a/lib/yajsonrpc/__init__.py
+++ b/lib/yajsonrpc/__init__.py
@@ -293,7 +293,12 @@
self._responses.append(response)
def requestDone(self, response):
- del self._requests[response.id]
+ try:
+ del self._requests[response.id]
+ except KeyError:
+ # ignore when request had no id
+ pass
+
self.addResponse(response)
self.sendReply()
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 15dbd7b..9a7f81f 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -57,6 +57,7 @@
iproute2Tests.py \
ipwrapperTests.py \
iscsiTests.py \
+ jsonrpcServerTests.py \
libvirtconnectionTests.py \
lvmTests.py \
main.py \
diff --git a/tests/jsonrpcServerTests.py b/tests/jsonrpcServerTests.py
new file mode 100644
index 0000000..6c508c2
--- /dev/null
+++ b/tests/jsonrpcServerTests.py
@@ -0,0 +1,126 @@
+#
+# Copyright 2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+import threading
+from contextlib import contextmanager
+from Queue import Queue
+
+from testlib import VdsmTestCase as TestCaseBase
+from yajsonrpc import JsonRpcServer, JsonRpcResponse, JsonRpcRequest
+
+
+ID = 'e8a936a6-d886-4cfa-97b9-2d54209053ff'
+
+
+class FakeBridge(object):
+
+ def get_text(self):
+ return 'my text'
+
+ def register_server_address(self, server_address):
+ self.server_address = server_address
+
+ def unregister_server_address(self):
+ self.server_address = None
+
+
+class TestClient(object):
+
+ def __init__(self):
+ self._queue = Queue()
+
+ def send(self, data):
+ self._queue.put_nowait(data)
+
+ def pop_message(self):
+ return self._queue.get(True, 3)
+
+ @property
+ def has_outgoing_messages(self):
+ return (self._queue.qsize() > 0)
+
+
+class TestConnection(object):
+
+ def get_local_address(self):
+ return 'localhost'
+
+
+@contextmanager
+def create_server(bridge):
+ server = JsonRpcServer(bridge, 3600)
+ t = threading.Thread(target=server.serve_requests,
+ name='JsonRpcServer')
+ t.setDaemon(True)
+ t.start()
+
+ try:
+ yield server
+ finally:
+ server.stop()
+
+
+class JsonrpcServerTest(TestCaseBase):
+
+ def test_method_call(self):
+ request = JsonRpcRequest(method='get_text', params=(), reqId=ID)
+ client = TestClient()
+
+ with create_server(FakeBridge()) as server:
+ server.queueRequest((client, TestConnection(), request.encode()))
+
+ response = client.pop_message()
+ self.assertIsNot(response, None)
+ res = JsonRpcResponse.decode(response)
+ self.assertEquals(res.result, 'my text')
+
+ def test_not_existing_method(self):
+ request = JsonRpcRequest(method='abcd', params=(), reqId=ID)
+ client = TestClient()
+
+ with create_server(FakeBridge()) as server:
+ server.queueRequest((client, TestConnection(), request.encode()))
+
+ response = client.pop_message()
+ self.assertIsNot(response, None)
+ res = JsonRpcResponse.decode(response)
+ self.assertEquals(res.error['code'], -32601)
+
+ def test_wrong_param(self):
+ request = JsonRpcRequest(method='get_text', params=('param',),
+ reqId=ID)
+ client = TestClient()
+
+ with create_server(FakeBridge()) as server:
+ server.queueRequest((client, TestConnection(), request.encode()))
+
+ response = client.pop_message()
+ self.assertIsNot(response, None)
+ res = JsonRpcResponse.decode(response)
+ self.assertEquals(res.error['code'], -32603)
+
+ def test_no_request_id(self):
+ request = JsonRpcRequest(method='get_text', params=())
+ client = TestClient()
+
+ with create_server(FakeBridge()) as server:
+ server.queueRequest((client, TestConnection(), request.encode()))
+
+ response = client.pop_message()
+ self.assertIsNot(response, None)
--
To view, visit https://gerrit.ovirt.org/43581
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I1090764c7289544abe331a13ec765ceed2a53afe
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Piotr Kliczewski <piotr.kliczewski(a)gmail.com>
7 years, 2 months
Change in vdsm[master]: vm: Cleanup waiting for xml update
by Nir Soffer
Nir Soffer has uploaded a new change for review.
Change subject: vm: Cleanup waiting for xml update
......................................................................
vm: Cleanup waiting for xml update
This patch cleans up a bit the code for waiting until libvirt xml is
updated after pivot was completed.
- Clarify confusing log message claiming that pivot failed after it
completed successfully
- Cleanup creation of volumes lists using generator expression
- More clear logic for checking current volumes list
- Replace detailed log message and unhelpful exception with detailed
exception
- Move comment out of the loop to make the loop more clear
- Remove unneeded keys() calls when looking up alias in chains
This code was added as temporary solution until libvirt is fixed, but I
think we would like keep a simplified version of it even after libvirt
is fixed, verifying that the operation was successful.
Change-Id: I9fec5416a62736bad461ddd0b54093d23960b7a6
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
M vdsm/virt/vm.py
1 file changed, 27 insertions(+), 24 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/38/39938/1
diff --git a/vdsm/virt/vm.py b/vdsm/virt/vm.py
index efadbdb..8ece47b 100644
--- a/vdsm/virt/vm.py
+++ b/vdsm/virt/vm.py
@@ -5100,40 +5100,43 @@
# synchronized and we may start the vm with a stale volume in the
# future. See https://bugzilla.redhat.com/show_bug.cgi?id=1202719 for
# more details.
- # TODO: Remove once we depend on a libvirt with this bug fixed.
# We expect libvirt to show that the original leaf has been removed
# from the active volume chain.
origVols = sorted([x['volumeID'] for x in self.drive.volumeChain])
- expectedVols = origVols[:]
- expectedVols.remove(self.drive.volumeID)
+ expectedVols = [v for v in origVols if v != self.driveVolumeID]
alias = self.drive['alias']
self.vm.log.info("Waiting for libvirt to update the XML after pivot "
"of drive %s completed", alias)
- while True:
- # This operation should complete in either one or two iterations of
- # this loop. Until libvirt updates the XML there is nothing to do
- # but wait. While we wait we continue to tell engine that the job
- # is ongoing. If we are still in this loop when the VM is powered
- # off, the merge will be resolved manually by engine using the
- # reconcileVolumeChain verb.
- chains = self.vm._driveGetActualVolumeChain([self.drive])
- if alias not in chains.keys():
- raise RuntimeError("Failed to retrieve volume chain for "
- "drive %s. Pivot failed.", alias)
- curVols = sorted([entry.uuid for entry in chains[alias]])
- if curVols == origVols:
- time.sleep(1)
- elif curVols == expectedVols:
+ # This operation should complete in either one or two iterations of
+ # this loop. Until libvirt updates the XML there is nothing to do
+ # but wait. While we wait we continue to tell engine that the job
+ # is ongoing. If we are still in this loop when the VM is powered
+ # off, the merge will be resolved manually by engine using the
+ # reconcileVolumeChain verb.
+ # TODO: Check once when we depend on a libvirt with this bug fixed.
+
+ while True:
+ chains = self.vm._driveGetActualVolumeChain([self.drive])
+ if alias not in chains:
+ raise RuntimeError("Failed to retrieve volume chain for "
+ "drive %s after pivot completed", alias)
+
+ curVols = sorted(entry.uuid for entry in chains[alias])
+
+ if curVols == expectedVols:
self.vm.log.info("The XML update has been completed")
- break
- else:
- self.log.error("Bad volume chain found for drive %s. Previous "
- "chain: %s, Expected chain: %s, Actual chain: "
- "%s", alias, origVols, expectedVols, curVols)
- raise RuntimeError("Bad volume chain found")
+ return
+
+ if curVols != origVols:
+ raise RuntimeError(
+ "Bad volume chain after pivot for drive %s. Previous "
+ "chain: %s, Expected chain: %s, Actual chain: %s" %
+ (alias, origVols, expectedVols, curVols))
+
+ time.sleep(1)
def _devicesWithAlias(domXML):
--
To view, visit https://gerrit.ovirt.org/39938
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I9fec5416a62736bad461ddd0b54093d23960b7a6
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Nir Soffer <nsoffer(a)redhat.com>
7 years, 2 months
Change in vdsm[master]: fc-scan: Use utilities from vdsm library.
by Nir Soffer
Nir Soffer has uploaded a new change for review.
Change subject: fc-scan: Use utilities from vdsm library.
......................................................................
fc-scan: Use utilities from vdsm library.
Replace low level threading code with simpler concurrent.tmap() call and
duplicate monotonic_time() with utils.monotonic_time().
Change-Id: Ic48748d6a43d41e034e16cb4f636ebe627881590
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
M vdsm/storage/fc-scan
1 file changed, 24 insertions(+), 46 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/66/38466/1
diff --git a/vdsm/storage/fc-scan b/vdsm/storage/fc-scan
index 344345d..c746ea4 100755
--- a/vdsm/storage/fc-scan
+++ b/vdsm/storage/fc-scan
@@ -38,43 +38,11 @@
import logging
import os
import sys
-import threading
+
+from vdsm import concurrent
+from vdsm import utils
log = logging.getLogger("fc-scan")
-
-
-class Scan(object):
-
- def __init__(self, host):
- self.host = host
- self.succeeded = False
- self.thread = None
-
- def start(self):
- self.thread = threading.Thread(target=self.run)
- self.thread.daemon = True
- self.thread.start()
-
- def wait(self):
- self.thread.join()
-
- def run(self):
- try:
- path = "/sys/class/scsi_host/%s/scan" % self.host
- log.debug("Scanning %s", path)
- start = monotonic_time()
- fd = os.open(path, os.O_WRONLY)
- try:
- os.write(fd, "- - -")
- finally:
- os.close(fd)
- self.succeeded = True
- elapsed = monotonic_time() - start
- log.debug("Scanned %s in %.2f seconds", path, elapsed)
- except OSError as e:
- log.error("Scanning %s failed: %s", path, e)
- except Exception:
- log.exception("Scanning %s failed", path)
def main(args):
@@ -93,22 +61,32 @@
log.debug("No fc_host found")
return 0
- scans = []
-
- for host in hosts:
- s = Scan(host)
- s.start()
- scans.append(s)
-
- for s in scans:
- s.wait()
+ scans = concurrent.tmap(scan_host, hosts)
if not all(s.succeeded for s in scans):
return 1
+ return 0
-def monotonic_time():
- return os.times()[4]
+
+def scan_host(name):
+ try:
+ path = "/sys/class/scsi_host/%s/scan" % name
+ log.debug("Scanning %s", path)
+ start = utils.monotonic_time()
+ fd = os.open(path, os.O_WRONLY)
+ try:
+ os.write(fd, "- - -")
+ finally:
+ os.close(fd)
+ elapsed = utils.monotonic_time() - start
+ log.debug("Scanned %s in %.2f seconds", path, elapsed)
+ except OSError as e:
+ log.error("Scanning %s failed: %s", path, e)
+ raise
+ except Exception:
+ log.exception("Scanning %s failed", path)
+ raise
if __name__ == '__main__':
--
To view, visit https://gerrit.ovirt.org/38466
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ic48748d6a43d41e034e16cb4f636ebe627881590
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Nir Soffer <nsoffer(a)redhat.com>
7 years, 2 months
Change in vdsm[master]: lib: Revert and refine error handling in tmap()
by Nir Soffer
Nir Soffer has uploaded a new change for review.
Change subject: lib: Revert and refine error handling in tmap()
......................................................................
lib: Revert and refine error handling in tmap()
In commit 2b7155b696 (lib: Simplify and generalize concurrent.tmap()),
we simplified error handling by returning a named tuple with function
results. This turned out less useful then the original error handling.
This patch returns the previous error handling:
- Functions passed to tmap() should not raise - if they raise, this is
considered a bug in the function.
- The last error is raised by tmap() instead of returning the result.
This make it easier to fail loudly for unexpected errors.
- The original exception is re-raised now with the original traceback.
- Error handling is documented properly now
Previously you had to make sure function raises to signal failures:
def func():
try:
code that should not fail...
code that may fail...
code that should not fail...
except ExpectedError:
log.error(...)
raise
except Exception:
log.exception(...)
raise
results = concurrent.tmap(func, values)
if not all(r.succeeded for r in results):
...
Returning the result as is lets us have nicer code:
def func():
code that should not fail...
try:
code that may fail...
except ExpectedError:
log.error(...)
return False
code that should not fail...
return True
succeeded = concurrent.tmap(func, values)
if not all(succeeded):
...
We can ignore unexpected errors, since tmap() will log them and fail
loudly. We can also minimize try except block for expected errors.
Change-Id: I0154b28ff7822c63e77181bbbf444c712bd0c31e
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
M lib/vdsm/concurrent.py
M tests/concurrentTests.py
2 files changed, 45 insertions(+), 19 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/11/39211/1
diff --git a/lib/vdsm/concurrent.py b/lib/vdsm/concurrent.py
index 64e072d..5498052 100644
--- a/lib/vdsm/concurrent.py
+++ b/lib/vdsm/concurrent.py
@@ -18,22 +18,42 @@
# Refer to the README and COPYING files for full details of the license
#
+import logging
import threading
-from collections import namedtuple
-
-
-Result = namedtuple("Result", ["succeeded", "value"])
+import sys
def tmap(func, iterable):
+ """
+ Run func with arguments from iterable in multiple threads, a returning the
+ output in order of arguments.
+
+ func should not raise exceptions - we consider this a bug in func, and will
+ fail the call and re-raise the exception in the caller thread.
+
+ Expected exceptions should be handled in func. If the caller likes to
+ handle the error later, func should return it:
+
+ def func(value):
+ try:
+ return something(value)
+ except ExpectedError as e:
+ return e
+
+ Unexpected exceptions should not be handled, as they are logged in the
+ worker threads and re-raised in the caller thread. If multiple excpetions
+ raised, only the last one will be re-raised in the caller thread.
+ """
args = list(iterable)
results = [None] * len(args)
+ error = [None]
def worker(i, f, arg):
try:
- results[i] = Result(True, f(arg))
- except Exception as e:
- results[i] = Result(False, e)
+ results[i] = f(arg)
+ except Exception:
+ error[0] = sys.exc_info()
+ logging.exception("Unhandled exception in tmap worker thread")
threads = []
for i, arg in enumerate(args):
@@ -45,4 +65,8 @@
for t in threads:
t.join()
+ if error[0] is not None:
+ t, v, tb = error[0]
+ raise t, v, tb
+
return results
diff --git a/tests/concurrentTests.py b/tests/concurrentTests.py
index 307e397..5c0646b 100644
--- a/tests/concurrentTests.py
+++ b/tests/concurrentTests.py
@@ -26,13 +26,16 @@
from vdsm import concurrent
+class Error(Exception):
+ pass
+
+
class TMapTests(VdsmTestCase):
def test_results(self):
values = tuple(range(10))
results = concurrent.tmap(lambda x: x, values)
- expected = [concurrent.Result(True, x) for x in values]
- self.assertEqual(results, expected)
+ self.assertEqual(results, list(values))
def test_results_order(self):
def func(x):
@@ -40,8 +43,7 @@
return x
values = tuple(random.random() * 0.1 for x in range(10))
results = concurrent.tmap(func, values)
- expected = [concurrent.Result(True, x) for x in values]
- self.assertEqual(results, expected)
+ self.assertEqual(results, list(values))
def test_concurrency(self):
start = time.time()
@@ -49,12 +51,12 @@
elapsed = time.time() - start
self.assertTrue(0.1 < elapsed < 0.2)
- def test_error(self):
- error = RuntimeError("No result for you!")
-
+ def test_raise_last_error(self):
def func(x):
- raise error
-
- results = concurrent.tmap(func, range(10))
- expected = [concurrent.Result(False, error)] * 10
- self.assertEqual(results, expected)
+ raise Error(x)
+ try:
+ concurrent.tmap(func, (1, 2, 3))
+ except Error as e:
+ self.assertEqual(e.args, (3,))
+ else:
+ self.fail("Exception was not raised")
--
To view, visit https://gerrit.ovirt.org/39211
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I0154b28ff7822c63e77181bbbf444c712bd0c31e
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Nir Soffer <nsoffer(a)redhat.com>
7 years, 2 months
Change in vdsm[master]: virt: Use Drive.diskType instead of networkDev and blockDev
by Nir Soffer
Nir Soffer has uploaded a new change for review.
Change subject: virt: Use Drive.diskType instead of networkDev and blockDev
......................................................................
virt: Use Drive.diskType instead of networkDev and blockDev
Now that we have explicit diskType we don't need to use the networkDev
and blockDev properties. This is very useful when we set libvirt disk
type property, or want to check for certain disk type.
Change-Id: Id68bc74b3d788dc82fc61bf8c3de5a52164d0989
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
M tests/vmStorageTests.py
M vdsm/virt/vm.py
2 files changed, 21 insertions(+), 28 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/72/40472/1
diff --git a/tests/vmStorageTests.py b/tests/vmStorageTests.py
index 8e8f7bc..dfe0991 100644
--- a/tests/vmStorageTests.py
+++ b/tests/vmStorageTests.py
@@ -268,74 +268,69 @@
def test_cdrom(self):
conf = drive_config(device='cdrom')
drive = Drive({}, self.log, **conf)
- self.assertFalse(drive.networkDev)
- self.assertFalse(drive.blockDev)
+ self.assertEqual(drive.diskType, DISK_TYPE.FILE)
def test_floppy(self):
conf = drive_config(device='floppy')
drive = Drive({}, self.log, **conf)
- self.assertFalse(drive.networkDev)
- self.assertFalse(drive.blockDev)
+ self.assertEqual(drive.diskType, DISK_TYPE.FILE)
def test_network_disk(self):
conf = drive_config(diskType=DISK_TYPE.NETWORK)
drive = Drive({}, self.log, **conf)
- self.assertTrue(drive.networkDev)
- self.assertFalse(drive.blockDev)
+ self.assertEqual(drive.diskType, DISK_TYPE.NETWORK)
@MonkeyPatch(utils, 'isBlockDevice', lambda path: True)
def test_block_disk(self):
conf = drive_config(device='disk')
drive = Drive({}, self.log, **conf)
- self.assertFalse(drive.networkDev)
- self.assertTrue(drive.blockDev)
+ self.assertEqual(drive.diskType, DISK_TYPE.BLOCK)
@MonkeyPatch(utils, 'isBlockDevice', lambda path: False)
def test_file_disk(self):
conf = drive_config(device='disk')
drive = Drive({}, self.log, **conf)
- self.assertFalse(drive.networkDev)
- self.assertFalse(drive.blockDev)
+ self.assertEqual(drive.diskType, DISK_TYPE.FILE)
@MonkeyPatch(utils, 'isBlockDevice', lambda path: False)
def test_migrate_from_file_to_block(self):
conf = drive_config(path='/filedomain/volume')
drive = Drive({}, self.log, **conf)
- self.assertFalse(drive.blockDev)
+ self.assertEqual(drive.diskType, DISK_TYPE.FILE)
# Migrate drive to block domain...
utils.isBlockDevice = lambda path: True
drive.path = "/blockdomain/volume"
- self.assertTrue(drive.blockDev)
+ self.assertEqual(drive.diskType, DISK_TYPE.BLOCK)
@MonkeyPatch(utils, 'isBlockDevice', lambda path: True)
def test_migrate_from_block_to_file(self):
conf = drive_config(path='/blockdomain/volume')
drive = Drive({}, self.log, **conf)
- self.assertTrue(drive.blockDev)
+ self.assertEqual(drive.diskType, DISK_TYPE.BLOCK)
# Migrate drive to file domain...
utils.isBlockDevice = lambda path: False
drive.path = "/filedomain/volume"
- self.assertFalse(drive.blockDev)
+ self.assertEqual(drive.diskType, DISK_TYPE.FILE)
@MonkeyPatch(utils, 'isBlockDevice', lambda path: True)
def test_migrate_from_block_to_network(self):
conf = drive_config(path='/blockdomain/volume')
drive = Drive({}, self.log, **conf)
- self.assertTrue(drive.blockDev)
+ self.assertEqual(drive.diskType, DISK_TYPE.BLOCK)
# Migrate drive to network disk...
drive.path = "pool/volume"
drive.diskType = DISK_TYPE.NETWORK
- self.assertFalse(drive.blockDev)
+ self.assertEqual(drive.diskType, DISK_TYPE.NETWORK)
@MonkeyPatch(utils, 'isBlockDevice', lambda path: True)
def test_migrate_network_to_block(self):
conf = drive_config(diskType=DISK_TYPE.NETWORK, path='pool/volume')
drive = Drive({}, self.log, **conf)
- self.assertTrue(drive.networkDev)
+ self.assertEqual(drive.diskType, DISK_TYPE.NETWORK)
# Migrate drive to block domain...
drive.path = '/blockdomain/volume'
drive.diskType = None
- self.assertTrue(drive.blockDev)
+ self.assertEqual(drive.diskType, DISK_TYPE.BLOCK)
@expandPermutations
diff --git a/vdsm/virt/vm.py b/vdsm/virt/vm.py
index 78b55c9..a6e742e 100644
--- a/vdsm/virt/vm.py
+++ b/vdsm/virt/vm.py
@@ -1862,7 +1862,7 @@
def _changeDisk(self, diskDeviceXmlElement):
diskType = diskDeviceXmlElement.getAttribute('type')
- if diskType not in ['file', 'block']:
+ if diskType not in (DISK_TYPE.BLOCK, DISK_TYPE.FILE):
return
diskSerial = diskDeviceXmlElement. \
@@ -1871,13 +1871,12 @@
for vmDrive in self._devices[hwclass.DISK]:
if vmDrive.serial == diskSerial:
# update the type
- diskDeviceXmlElement.setAttribute(
- 'type', 'block' if vmDrive.blockDev else 'file')
+ diskDeviceXmlElement.setAttribute('type', vmDrive.diskType)
# update the path
+ attr = 'dev' if vmDrive.diskType == DISK_TYPE.BLOCK else 'file'
diskDeviceXmlElement.getElementsByTagName('source')[0]. \
- setAttribute('dev' if vmDrive.blockDev else 'file',
- vmDrive.path)
+ setAttribute(attr, vmDrive.path)
# update the format (the disk might have been collapsed)
diskDeviceXmlElement.getElementsByTagName('driver')[0]. \
@@ -2773,7 +2772,7 @@
# we specify type='block' and dev=path for block volumes but we
# always speficy the file=path for backwards compatibility.
args = {'type': sourceType, 'file': newPath}
- if sourceType == 'block':
+ if sourceType == DISK_TYPE.BLOCK:
args['dev'] = newPath
disk.appendChildWithArgs('source', **args)
return disk
@@ -2881,7 +2880,7 @@
newDrives[vmDevName]["format"] = "cow"
# We need to keep track of the drive object because we cannot
- # safely access the blockDev property until after prepareVolumePath
+ # safely access the diskType property until after prepareVolumePath
vmDrives[vmDevName] = vmDrive
# If all the drives are the current ones, return success
@@ -2905,9 +2904,8 @@
_rollbackDrives(preparedDrives)
return errCode['snapshotErr']
- snapType = 'block' if vmDrives[vmDevName].blockDev else 'file'
snapelem = _diskSnapshot(vmDevName, newDrives[vmDevName]["path"],
- snapType)
+ vmDrives[vmDevName].diskType)
disks.appendChild(snapelem)
snap.appendChild(disks)
@@ -4553,7 +4551,7 @@
sourceXML = find_element_by_name(diskXML, 'source')
if not sourceXML:
break
- sourceAttr = ('file', 'dev')[drive.blockDev]
+ sourceAttr = 'dev' if drive.diskType == DISK_TYPE.BLOCK else 'file'
path = sourceXML.getAttribute(sourceAttr)
# TODO: Allocation information is not available in the XML. Switch
--
To view, visit https://gerrit.ovirt.org/40472
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Id68bc74b3d788dc82fc61bf8c3de5a52164d0989
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Nir Soffer <nsoffer(a)redhat.com>
7 years, 2 months
Change in vdsm[master]: misc: Safer and simpler itmap
by Nir Soffer
Nir Soffer has uploaded a new change for review.
Change subject: misc: Safer and simpler itmap
......................................................................
misc: Safer and simpler itmap
The previous code had few issues:
- It used unlimited number of threads by default. This may lead to
creation of 100's of threads if you do not specify a value.
- It used non-daemon threads, which could lead to unwanted delay during
vdsm shutdown.
- It tried to yield results before all arguments were handled. This
could lead to unwanted delay in argument processing, if the caller
would block processing the results.
- It started one thread per value, even if maxthreads was smaller than
number of values.
- It was too complicated.
Changes:
- The caller must specify the maximum number of threads.
- Use daemon threads
- Queue all values before yielding results
- Start up to maxthreads worker threads, each processing multiple values
- Simplify the code
- Add test for error handling
Change-Id: Iba6116ac4003702c8e921cebaf494491a6f9afaf
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
M tests/miscTests.py
M vdsm/storage/misc.py
2 files changed, 42 insertions(+), 42 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/19/39119/1
diff --git a/tests/miscTests.py b/tests/miscTests.py
index 31f64fa..4b3e3c3 100644
--- a/tests/miscTests.py
+++ b/tests/miscTests.py
@@ -196,7 +196,7 @@
# outOfProcess operation + 1. it let us know that oop and itmap operate
# properly with their limitations
data = frozenset(range(oop.HELPERS_PER_DOMAIN + 1))
- ret = frozenset(misc.itmap(dummy, data, misc.UNLIMITED_THREADS))
+ ret = frozenset(misc.itmap(dummy, data, len(data)))
self.assertEquals(ret, data)
def testMoreThreadsThanArgs(self):
@@ -207,6 +207,13 @@
data = 1
self.assertRaises(ValueError, misc.itmap(int, data, 0).next)
+ def testErrors(self):
+ err = Exception()
+ def dummy(arg):
+ raise err
+ data = [1, 2, 3]
+ self.assertEqual(list(misc.itmap(dummy, data, 4)), [err] * len(data))
+
class RotateFiles(TestCaseBase):
diff --git a/vdsm/storage/misc.py b/vdsm/storage/misc.py
index eb484c7..463fd04 100644
--- a/vdsm/storage/misc.py
+++ b/vdsm/storage/misc.py
@@ -58,7 +58,6 @@
STR_UUID_SIZE = 36
UUID_HYPHENS = [8, 13, 18, 23]
MEGA = 1 << 20
-UNLIMITED_THREADS = -1
log = logging.getLogger('Storage.Misc')
@@ -882,53 +881,47 @@
raise exception
-def itmap(func, iterable, maxthreads=UNLIMITED_THREADS):
+def itmap(func, iterable, maxthreads):
"""
- Make an iterator that computes the function using
- arguments from the iterable. It works similar to tmap
- by running each operation in a different thread, this
- causes the results not to return in any particular
- order so it's good if you don't care about the order
- of the results.
- maxthreads stands for maximum threads that we can initiate simultaneosly.
- If we reached to max threads the function waits for thread to
- finish before initiate the next one.
+ Return an iterator calling func with arguments from iterable in multiple threads.
+
+ Unlike tmap, the results are not returned in the original order of the
+ arguments, and number of threads is limited to maxthreads.
"""
- if maxthreads < 1 and maxthreads != UNLIMITED_THREADS:
- raise ValueError("Wrong input to function itmap: %s", maxthreads)
+ if maxthreads < 1:
+ raise ValueError("Invalid maxthreads value: %s" % maxthreads)
- respQueue = Queue.Queue()
+ DONE = object()
+ values = Queue.Queue()
+ results = Queue.Queue()
- def wrapper(value):
- try:
- respQueue.put(func(value))
- except Exception as e:
- respQueue.put(e)
+ def worker():
+ while True:
+ value = values.get()
+ if value is DONE:
+ return
+ try:
+ results.put(func(value))
+ except Exception as e:
+ results.put(e)
- threadsCount = 0
- for arg in iterable:
- if maxthreads != UNLIMITED_THREADS:
- if maxthreads == 0:
- # This not supposed to happened. If it does, it's a bug.
- # maxthreads should get to 0 only after threadsCount is
- # greater than 1
- if threadsCount < 1:
- raise RuntimeError("No thread initiated")
- else:
- yield respQueue.get()
- # if yield returns one thread stopped, so we can run
- # another thread in queue
- maxthreads += 1
- threadsCount -= 1
+ count = 0
+ threads = 0
- t = threading.Thread(target=wrapper, args=(arg,))
- t.start()
- threadsCount += 1
- maxthreads -= 1
+ for value in iterable:
+ values.put(value)
+ count += 1
+ if threads < maxthreads:
+ t = threading.Thread(target=worker)
+ t.daemon = True
+ t.start()
+ threads += 1
- # waiting for rest threads to end
- for i in xrange(threadsCount):
- yield respQueue.get()
+ for _ in range(threads):
+ values.put(DONE)
+
+ for _ in xrange(count):
+ yield results.get()
def isAscii(s):
--
To view, visit https://gerrit.ovirt.org/39119
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Iba6116ac4003702c8e921cebaf494491a6f9afaf
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Nir Soffer <nsoffer(a)redhat.com>
7 years, 2 months
Change in vdsm[master]: udevadm: More precise error handling
by Nir Soffer
Nir Soffer has uploaded a new change for review.
Change subject: udevadm: More precise error handling
......................................................................
udevadm: More precise error handling
udevadm provides a --timeout option, but there is no robust way to
detect a timeout in EL6, EL7, and Fedora 20. In Fedora 21 and upstream,
udevadm ignores the timeout option. This patch improves error handling
by using our own timeout.
udevadm.settle() raises now udevadm.Failure or udevadm.Timeout, and the
caller is responsible to handle the error.
In both multipath.rescan() and IscsiConnection.connect(), we warn about
timeout but do not handle other errors, so real errors in udevadm will
fail loudly.
Change-Id: Ia0a7380b1b181ec93399ea741122cfa2e98086fb
Relates-To: https://bugzilla.redhat.com/1209474
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
A tests/udevadmTests.py
M vdsm/storage/multipath.py
M vdsm/storage/storageServer.py
M vdsm/storage/udevadm.py
4 files changed, 106 insertions(+), 21 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/40/39740/1
diff --git a/tests/udevadmTests.py b/tests/udevadmTests.py
new file mode 100644
index 0000000..90841b2
--- /dev/null
+++ b/tests/udevadmTests.py
@@ -0,0 +1,52 @@
+#
+# Copyright 2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+
+
+from monkeypatch import MonkeyPatch
+from testlib import VdsmTestCase
+
+from vdsm import utils
+from storage import udevadm
+
+TRUE = utils.CommandPath("true", "/bin/true", "/usr/bin/true")
+FALSE = utils.CommandPath("false", "/bin/false", "/usr/bin/false")
+READ = utils.CommandPath("read", "/bin/read", "/usr/bin/read")
+
+
+class UdevadmSettleTests(VdsmTestCase):
+
+ @MonkeyPatch(udevadm, "_UDEVADM", TRUE)
+ def test_success(self):
+ udevadm.settle(5)
+
+ @MonkeyPatch(udevadm, "_UDEVADM", FALSE)
+ def test_error(self):
+ try:
+ udevadm.settle(5)
+ except udevadm.Failure as e:
+ self.assertEqual(e.rc, 1)
+ self.assertEqual(e.out, "")
+ self.assertEqual(e.err, "")
+ else:
+ self.fail("Failure not raised")
+
+ @MonkeyPatch(udevadm, "_UDEVADM", READ)
+ def test_timeout(self):
+ self.assertRaises(udevadm.Timeout, udevadm.settle, 1)
diff --git a/vdsm/storage/multipath.py b/vdsm/storage/multipath.py
index a1c42b3..925c411 100644
--- a/vdsm/storage/multipath.py
+++ b/vdsm/storage/multipath.py
@@ -73,7 +73,10 @@
# events are processed, ensuring detection of new devices and creation or
# update of multipath devices.
timeout = config.getint('irs', 'scsi_settle_timeout')
- udevadm.settle(timeout)
+ try:
+ udevadm.settle(timeout)
+ except udevadm.Timeout as e:
+ log.warning("Timeout waiting for udev events: %s", e)
def deduceType(a, b):
diff --git a/vdsm/storage/storageServer.py b/vdsm/storage/storageServer.py
index 22a90d1..c19fb8d 100644
--- a/vdsm/storage/storageServer.py
+++ b/vdsm/storage/storageServer.py
@@ -382,7 +382,10 @@
def connect(self):
iscsi.addIscsiNode(self._iface, self._target, self._cred)
timeout = config.getint("irs", "scsi_settle_timeout")
- udevadm.settle(timeout)
+ try:
+ udevadm.settle(timeout)
+ except udevadm.Timeout as e:
+ self.log.warning("Timeout waiting for udev events: %s", e)
def _match(self, session):
target = session.target
diff --git a/vdsm/storage/udevadm.py b/vdsm/storage/udevadm.py
index 4b4b54a..a2afd04 100644
--- a/vdsm/storage/udevadm.py
+++ b/vdsm/storage/udevadm.py
@@ -18,22 +18,39 @@
# Refer to the README and COPYING files for full details of the license
#
-import logging
+import errno
+import signal
+
from vdsm import utils
+from vdsm.infra import zombiereaper
_UDEVADM = utils.CommandPath("udevadm", "/sbin/udevadm", "/usr/sbin/udevadm")
class Error(Exception):
+ message = None
- def __init__(self, rc, out, err):
+ def __str__(self):
+ return self.message.format(self=self)
+
+
+class Failure(Error):
+ message = ("udevadm failed cmd={self.cmd} rc={self.rc} out={self.out!r} "
+ "err={self.err!r}")
+
+ def __init__(self, cmd, rc, out, err):
+ self.cmd = cmd
self.rc = rc
self.out = out
self.err = err
- def __str__(self):
- return "Process failed with rc=%d out=%r err=%r" % (
- self.rc, self.out, self.err)
+
+class Timeout(Error):
+ message = ("udevadm timed out cmd={self.cmd} timeout={self.timeout}")
+
+ def __init__(self, cmd, timeout):
+ self.cmd = cmd
+ self.timeout = timeout
def settle(timeout, exit_if_exists=None):
@@ -44,25 +61,35 @@
Arguments:
timeout Maximum number of seconds to wait for the event queue to
- become empty. A value of 0 will check if the queue is empty
- and always return immediately.
+ become empty.
exit_if_exists Stop waiting if file exists.
+
+ Raises Failure if udevadm failed, or Timeout if udevadm did not terminate
+ within the requested timeout.
"""
- args = ["settle", "--timeout=%s" % timeout]
+ cmd = [_UDEVADM.cmd, "settle"]
if exit_if_exists:
- args.append("--exit-if-exists=%s" % exit_if_exists)
+ cmd.append("--exit-if-exists=%s" % exit_if_exists)
- try:
- _run_command(args)
- except Error as e:
- logging.error("%s", e)
+ _run_command(cmd, timeout)
-def _run_command(args):
- cmd = [_UDEVADM.cmd]
- cmd.extend(args)
- rc, out, err = utils.execCmd(cmd, raw=True)
- if rc != 0:
- raise Error(rc, out, err)
+def _run_command(cmd, timeout=None):
+ proc = utils.execCmd(cmd, sync=False, deathSignal=signal.SIGKILL)
+
+ if not proc.wait(timeout):
+ try:
+ proc.kill()
+ except OSError as e:
+ if e.errno != errno.ESRCH:
+ raise
+ finally:
+ zombiereaper.autoReapPID(proc.pid)
+ raise Timeout(cmd, timeout)
+
+ if proc.returncode != 0:
+ out = "".join(proc.stdout)
+ err = "".join(proc.stderr)
+ raise Failure(cmd, proc.returncode, out, err)
--
To view, visit https://gerrit.ovirt.org/39740
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ia0a7380b1b181ec93399ea741122cfa2e98086fb
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Nir Soffer <nsoffer(a)redhat.com>
7 years, 2 months
Change in vdsm[master]: FileVolumeMetadata: split getMetadata
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: FileVolumeMetadata: split getMetadata
......................................................................
FileVolumeMetadata: split getMetadata
Change-Id: I1d3fb61831de5b50a3e562b80bf38ef15ede254f
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/fileVolume.py
1 file changed, 16 insertions(+), 13 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/69/44569/1
diff --git a/vdsm/storage/fileVolume.py b/vdsm/storage/fileVolume.py
index 08e3f50..74aa911 100644
--- a/vdsm/storage/fileVolume.py
+++ b/vdsm/storage/fileVolume.py
@@ -123,18 +123,10 @@
"""
return (self.getVolumePath(),)
- def getMetadata(self, metaId=None):
- """
- Get Meta data array of key,values lines
- """
- if not metaId:
- metaId = self.getMetadataId()
-
- volPath, = metaId
- metaPath = self._getMetaVolumePath(volPath)
-
+ @classmethod
+ def read_metadata(cls, oop, meta_path):
try:
- f = self.oop.directReadLines(metaPath)
+ f = oop.directReadLines(meta_path)
out = {}
for l in f:
if l.startswith("EOF"):
@@ -145,11 +137,22 @@
out[key.strip()] = value.strip()
except Exception as e:
- self.log.error(e, exc_info=True)
- raise se.VolumeMetadataReadError("%s: %s" % (metaId, e))
+ cls.log.error(e, exc_info=True)
+ raise se.VolumeMetadataReadError("%s: %s" % (meta_path, e))
return out
+ def getMetadata(self, metaId=None):
+ """
+ Get Meta data array of key,values lines
+ """
+ if not metaId:
+ metaId = self.getMetadataId()
+
+ volPath, = metaId
+ metaPath = self._getMetaVolumePath(volPath)
+ return self.read_metadata(self.oop, metaPath)
+
def getParentId(self):
"""
Return parent volume UUID
--
To view, visit https://gerrit.ovirt.org/44569
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I1d3fb61831de5b50a3e562b80bf38ef15ede254f
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
7 years, 2 months
Change in vdsm[master]: VolumeMetadata: move prepare and teardown
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: VolumeMetadata: move prepare and teardown
......................................................................
VolumeMetadata: move prepare and teardown
Change-Id: Iba0954ace3b1da5ea9afc41aeb6ea69a729fe29c
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/blockVolume.py
M vdsm/storage/fileVolume.py
M vdsm/storage/volume.py
3 files changed, 117 insertions(+), 109 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/50/44050/1
diff --git a/vdsm/storage/blockVolume.py b/vdsm/storage/blockVolume.py
index 0168d64..597e7ee 100644
--- a/vdsm/storage/blockVolume.py
+++ b/vdsm/storage/blockVolume.py
@@ -74,6 +74,8 @@
volume.VolumeMetadata.__init__(self, repoPath, sdUUID, imgUUID,
volUUID)
self.metaoff = None
+ self.lvmActivationNamespace = sd.getNamespace(self.sdUUID,
+ LVM_ACTIVATION_NAMESPACE)
def getMetadataId(self):
"""
@@ -332,6 +334,49 @@
lvs = lvm.lvsByTag(sdUUID, "%s%s" % (TAG_PREFIX_IMAGE, imgUUID))
return [lv.name for lv in lvs]
+ @logskip("ResourceManager")
+ def llPrepare(self, rw=False, setrw=False):
+ """
+ Perform low level volume use preparation
+
+ For the Block Volumes the actual LV activation is wrapped
+ into lvmActivation resource. It is being initialized by the
+ storage domain sitting on top of the encapsulating VG.
+ We just use it here.
+ """
+ if setrw:
+ self.setrw(rw=rw)
+ access = rm.LockType.exclusive if rw else rm.LockType.shared
+ activation = rmanager.acquireResource(self.lvmActivationNamespace,
+ self.volUUID, access)
+ activation.autoRelease = False
+
+ @classmethod
+ def teardown(cls, sdUUID, volUUID, justme=False):
+ """
+ Deactivate volume and release resources.
+ Volume deactivation occurs as part of resource releasing.
+ If justme is false, the entire COW chain should be torn down.
+ """
+ cls.log.info("Tearing down volume %s/%s justme %s"
+ % (sdUUID, volUUID, justme))
+ lvmActivationNamespace = sd.getNamespace(sdUUID,
+ LVM_ACTIVATION_NAMESPACE)
+ rmanager.releaseResource(lvmActivationNamespace, volUUID)
+ if not justme:
+ try:
+ pvolUUID = _getVolumeTag(sdUUID, volUUID, TAG_PREFIX_PARENT)
+ except Exception as e:
+ # If storage not accessible or lvm error occurred
+ # we will failure to get the parent volume.
+ # We can live with it and still succeed in volume's teardown.
+ pvolUUID = volume.BLANK_UUID
+ cls.log.warn("Failure to get parent of volume %s/%s (%s)"
+ % (sdUUID, volUUID, e))
+
+ if pvolUUID != volume.BLANK_UUID:
+ cls.teardown(sdUUID=sdUUID, volUUID=pvolUUID, justme=False)
+
class BlockVolume(volume.Volume):
""" Actually represents a single volume (i.e. part of virtual disk).
@@ -341,8 +386,6 @@
def __init__(self, repoPath, sdUUID, imgUUID, volUUID):
md = self.MetadataClass(repoPath, sdUUID, imgUUID, volUUID)
volume.Volume.__init__(self, md)
- self.lvmActivationNamespace = sd.getNamespace(self.sdUUID,
- LVM_ACTIVATION_NAMESPACE)
@property
def metaoff(self):
@@ -609,49 +652,6 @@
def shareVolumeRollback(cls, taskObj, volPath):
cls.log.info("Volume rollback for volPath=%s", volPath)
utils.rmFile(volPath)
-
- @logskip("ResourceManager")
- def llPrepare(self, rw=False, setrw=False):
- """
- Perform low level volume use preparation
-
- For the Block Volumes the actual LV activation is wrapped
- into lvmActivation resource. It is being initialized by the
- storage domain sitting on top of the encapsulating VG.
- We just use it here.
- """
- if setrw:
- self.setrw(rw=rw)
- access = rm.LockType.exclusive if rw else rm.LockType.shared
- activation = rmanager.acquireResource(self.lvmActivationNamespace,
- self.volUUID, access)
- activation.autoRelease = False
-
- @classmethod
- def teardown(cls, sdUUID, volUUID, justme=False):
- """
- Deactivate volume and release resources.
- Volume deactivation occurs as part of resource releasing.
- If justme is false, the entire COW chain should be torn down.
- """
- cls.log.info("Tearing down volume %s/%s justme %s"
- % (sdUUID, volUUID, justme))
- lvmActivationNamespace = sd.getNamespace(sdUUID,
- LVM_ACTIVATION_NAMESPACE)
- rmanager.releaseResource(lvmActivationNamespace, volUUID)
- if not justme:
- try:
- pvolUUID = _getVolumeTag(sdUUID, volUUID, TAG_PREFIX_PARENT)
- except Exception as e:
- # If storage not accessible or lvm error occurred
- # we will failure to get the parent volume.
- # We can live with it and still succeed in volume's teardown.
- pvolUUID = volume.BLANK_UUID
- cls.log.warn("Failure to get parent of volume %s/%s (%s)"
- % (sdUUID, volUUID, e))
-
- if pvolUUID != volume.BLANK_UUID:
- cls.teardown(sdUUID=sdUUID, volUUID=pvolUUID, justme=False)
def getVolumeTag(self, tagPrefix):
return self.md.getVolumeTag(tagPrefix)
diff --git a/vdsm/storage/fileVolume.py b/vdsm/storage/fileVolume.py
index 2dad91f..08e3f50 100644
--- a/vdsm/storage/fileVolume.py
+++ b/vdsm/storage/fileVolume.py
@@ -337,6 +337,26 @@
volList.append(volid)
return volList
+ def llPrepare(self, rw=False, setrw=False):
+ """
+ Make volume accessible as readonly (internal) or readwrite (leaf)
+ """
+ volPath = self.getVolumePath()
+
+ # Volumes leaves created in 2.2 did not have group writeable bit
+ # set. We have to set it here if we want qemu-kvm to write to old
+ # NFS volumes.
+ self.oop.fileUtils.copyUserModeToGroup(volPath)
+
+ if setrw:
+ self.setrw(rw=rw)
+ if rw:
+ if not self.oop.os.access(volPath, os.R_OK | os.W_OK):
+ raise se.VolumeAccessError(volPath)
+ else:
+ if not self.oop.os.access(volPath, os.R_OK):
+ raise se.VolumeAccessError(volPath)
+
class FileVolume(volume.Volume):
""" Actually represents a single volume (i.e. part of virtual disk).
@@ -488,26 +508,6 @@
procPool.utils.rmFile(volPath)
procPool.utils.rmFile(cls.__metaVolumePath(volPath))
procPool.utils.rmFile(cls.MetadataClass._leaseVolumePath(volPath))
-
- def llPrepare(self, rw=False, setrw=False):
- """
- Make volume accessible as readonly (internal) or readwrite (leaf)
- """
- volPath = self.getVolumePath()
-
- # Volumes leaves created in 2.2 did not have group writeable bit
- # set. We have to set it here if we want qemu-kvm to write to old
- # NFS volumes.
- self.oop.fileUtils.copyUserModeToGroup(volPath)
-
- if setrw:
- self.setrw(rw=rw)
- if rw:
- if not self.oop.os.access(volPath, os.R_OK | os.W_OK):
- raise se.VolumeAccessError(volPath)
- else:
- if not self.oop.os.access(volPath, os.R_OK):
- raise se.VolumeAccessError(volPath)
@classmethod
def __putMetadata(cls, metaId, meta):
diff --git a/vdsm/storage/volume.py b/vdsm/storage/volume.py
index a098eb7..22f7ead 100644
--- a/vdsm/storage/volume.py
+++ b/vdsm/storage/volume.py
@@ -525,6 +525,56 @@
return apparentSize
return req_size
+ def prepare(self, rw=True, justme=False,
+ chainrw=False, setrw=False, force=False):
+ """
+ Prepare volume for use by consumer.
+ If justme is false, the entire COW chain is prepared.
+ Note: setrw arg may be used only by SPM flows.
+ """
+ self.log.info("Volume: preparing volume %s/%s",
+ self.sdUUID, self.volUUID)
+
+ if not force:
+ # Cannot prepare ILLEGAL volume
+ if not self.isLegal():
+ raise se.prepareIllegalVolumeError(self.volUUID)
+
+ if rw and self.isShared():
+ if chainrw:
+ rw = False # Shared cannot be set RW
+ else:
+ raise se.SharedVolumeNonWritable(self)
+
+ if (not chainrw and rw and self.isInternal() and setrw and
+ not self.recheckIfLeaf()):
+ raise se.InternalVolumeNonWritable(self)
+
+ self.llPrepare(rw=rw, setrw=setrw)
+ self.updateInvalidatedSize()
+
+ try:
+ if justme:
+ return True
+ pvol = self.produceParent()
+ if pvol:
+ pvol.prepare(rw=chainrw, justme=False,
+ chainrw=chainrw, setrw=setrw)
+ except Exception:
+ self.log.error("Unexpected error", exc_info=True)
+ self.teardown(self.sdUUID, self.volUUID)
+ raise
+
+ return True
+
+ @classmethod
+ def teardown(cls, sdUUID, volUUID, justme=False):
+ """
+ Teardown volume.
+ If justme is false, the entire COW chain is teared down.
+ """
+ pass
+
class Volume(object):
log = logging.getLogger('Storage.Volume')
@@ -1091,53 +1141,11 @@
def prepare(self, rw=True, justme=False,
chainrw=False, setrw=False, force=False):
- """
- Prepare volume for use by consumer.
- If justme is false, the entire COW chain is prepared.
- Note: setrw arg may be used only by SPM flows.
- """
- self.log.info("Volume: preparing volume %s/%s",
- self.sdUUID, self.volUUID)
-
- if not force:
- # Cannot prepare ILLEGAL volume
- if not self.isLegal():
- raise se.prepareIllegalVolumeError(self.volUUID)
-
- if rw and self.isShared():
- if chainrw:
- rw = False # Shared cannot be set RW
- else:
- raise se.SharedVolumeNonWritable(self)
-
- if (not chainrw and rw and self.isInternal() and setrw and
- not self.recheckIfLeaf()):
- raise se.InternalVolumeNonWritable(self)
-
- self.llPrepare(rw=rw, setrw=setrw)
- self.updateInvalidatedSize()
-
- try:
- if justme:
- return True
- pvol = self.produceParent()
- if pvol:
- pvol.prepare(rw=chainrw, justme=False,
- chainrw=chainrw, setrw=setrw)
- except Exception:
- self.log.error("Unexpected error", exc_info=True)
- self.teardown(self.sdUUID, self.volUUID)
- raise
-
- return True
+ return self.md.prepare(rw, justme, chainrw, setrw, force)
@classmethod
def teardown(cls, sdUUID, volUUID, justme=False):
- """
- Teardown volume.
- If justme is false, the entire COW chain is teared down.
- """
- pass
+ cls.MetadataClass.teardown(sdUUID, volUUID, justme)
@classmethod
def newMetadata(cls, metaId, sdUUID, imgUUID, puuid, size, format, type,
--
To view, visit https://gerrit.ovirt.org/44050
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Iba0954ace3b1da5ea9afc41aeb6ea69a729fe29c
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
7 years, 2 months
Change in vdsm[master]: sdc: Allow StorageDomainCache to cache StorageDomainManifest...
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: sdc: Allow StorageDomainCache to cache StorageDomainManifest objects
......................................................................
sdc: Allow StorageDomainCache to cache StorageDomainManifest objects
When HSM is operating without SPM the system should always be using
StorageDomainManifest objects instead of StorageDomains. With this
patch we can instruct the cache to serve the correct type of object
without changing all consumers of sdCache.
Change-Id: I9a3dc7d9bf24f7d8b60ddff6f5364b65a9354e45
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/sdc.py
1 file changed, 12 insertions(+), 3 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/41/44041/1
diff --git a/vdsm/storage/sdc.py b/vdsm/storage/sdc.py
index ecb9708..cbdaed9 100644
--- a/vdsm/storage/sdc.py
+++ b/vdsm/storage/sdc.py
@@ -64,12 +64,13 @@
STORAGE_STALE = 1
STORAGE_REFRESHING = 2
- def __init__(self, storage_repo):
+ def __init__(self, storage_repo, use_manifests=False):
self._syncroot = threading.Condition()
self.__domainCache = {}
self.__inProgress = set()
self.__staleStatus = self.STORAGE_STALE
self.storage_repo = storage_repo
+ self.use_manifests = use_manifests
self.knownSDs = {} # {sdUUID: mod.findDomain}
def invalidateStorage(self):
@@ -162,12 +163,18 @@
# this changes, please update the order.
for mod in (blockSD, glusterSD, localFsSD, nfsSD):
try:
- return mod.findDomain(sdUUID)
+ if self.use_manifests:
+ ret = mod.findDomainManifest(sdUUID)
+ else:
+ ret = mod.findDomain(sdUUID)
except se.StorageDomainDoesNotExist:
pass
except Exception:
self.log.error("Error while looking for domain `%s`", sdUUID,
exc_info=True)
+ else:
+ self.log.debug("Found domain %s", ret)
+ return ret
raise se.StorageDomainDoesNotExist(sdUUID)
@@ -181,10 +188,12 @@
return uuids
- def refresh(self):
+ def refresh(self, use_manifests=None):
with self._syncroot:
lvm.invalidateCache()
self.__domainCache.clear()
+ if use_manifests is not None:
+ self.use_manifests = use_manifests
def manuallyAddDomain(self, domain):
with self._syncroot:
--
To view, visit https://gerrit.ovirt.org/44041
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I9a3dc7d9bf24f7d8b60ddff6f5364b65a9354e45
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
7 years, 2 months