Change in vdsm[master]: clusterlock: Add reference counting
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: clusterlock: Add reference counting
......................................................................
clusterlock: Add reference counting
The clusterLock can now be acquired by multiple threads of execution
since it is used by SDM verbs now. We need reference counting to ensure
that one thread does not release the clusterLock while another thread
still needs it.
Change-Id: I846116ae16e88a51bdce20f97ddf22859dea3086
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/clusterlock.py
1 file changed, 55 insertions(+), 45 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/78/40378/1
diff --git a/vdsm/storage/clusterlock.py b/vdsm/storage/clusterlock.py
index 86d47ad..96bba2b 100644
--- a/vdsm/storage/clusterlock.py
+++ b/vdsm/storage/clusterlock.py
@@ -211,6 +211,7 @@
def __init__(self, sdUUID, idsPath, leasesPath, *args):
self._lock = threading.Lock()
+ self._clusterLockUsers = 0
self._sdUUID = sdUUID
self._idsPath = idsPath
self._leasesPath = leasesPath
@@ -302,16 +303,22 @@
# ClusterLock. We could consider to remove it in the future but keeping it
# for logging purpose is desirable.
def acquireClusterLock(self, hostId):
- self.log.info("Acquiring cluster lock for domain %s (id: %s)",
- self._sdUUID, hostId)
- self._acquire(SDM_LEASE_NAME, self.getLockDisk())
- self.log.debug("Cluster lock for domain %s successfully acquired "
- "(id: %s)", self._sdUUID, hostId)
+ with nested(self._lock, SANLock._sanlock_lock):
+ self.log.info("Acquiring cluster lock for domain %s (id: %s)",
+ self._sdUUID, hostId)
+ if self._clusterLockUsers == 0:
+ self._acquire(SDM_LEASE_NAME, self.getLockDisk())
+ self._clusterLockUsers = self._clusterLockUsers + 1
+ self.log.debug("Cluster lock for domain %s successfully acquired "
+ "(id: %s, users: %i)", self._sdUUID, hostId,
+ self._clusterLockUsers)
def acquireResource(self, resource, lockDisk, shared=False):
- self.log.info("Acquiring resource lock for %s", resource)
- self._acquire(resource, lockDisk, shared)
- self.log.debug("Resource lock for %s successfully acquired", resource)
+ with nested(self._lock, SANLock._sanlock_lock):
+ self.log.info("Acquiring resource lock for %s", resource)
+ self._acquire(resource, lockDisk, shared)
+ self.log.debug("Resource lock for %s successfully acquired",
+ resource)
def inquireClusterLock(self):
resource, owners = self._inquire(SDM_LEASE_NAME, self.getLockDisk())
@@ -330,43 +337,47 @@
[owner.get("host_id") for owner in owners])
def releaseClusterLock(self):
- self.log.info("Releasing cluster lock for domain %s", self._sdUUID)
- self._release(SDM_LEASE_NAME, self.getLockDisk())
- self.log.debug("Cluster lock for domain %s successfully released",
- self._sdUUID)
+ with self._lock:
+ self.log.info("Releasing cluster lock for domain %s", self._sdUUID)
+ if self._clusterLockUsers == 1:
+ self._release(SDM_LEASE_NAME, self.getLockDisk())
+ self._clusterLockUsers = self._clusterLockUsers - 1
+ self.log.debug("Cluster lock for domain %s successfully released "
+ "(users: %i)", self._sdUUID, self._clusterLockUsers)
def releaseResource(self, resource, lockDisk):
- self.log.info("Releasing resource lock for %s", resource)
- self._release(resource, lockDisk)
- self.log.debug("Resource lock for %s successfully released", resource)
+ with self._lock:
+ self.log.info("Releasing resource lock for %s", resource)
+ self._release(resource, lockDisk)
+ self.log.debug("Resource lock for %s successfully released",
+ resource)
def _acquire(self, resource, lockDisk, shared=False):
- with nested(self._lock, SANLock._sanlock_lock):
- self.log.info("Acquiring resource %s, shared=%s", resource, shared)
+ self.log.info("Acquiring resource %s, shared=%s", resource, shared)
- while True:
- if SANLock._sanlock_fd is None:
- try:
- SANLock._sanlock_fd = sanlock.register()
- except sanlock.SanlockException as e:
- raise se.AcquireLockFailure(
- self._sdUUID, e.errno,
- "Cannot register to sanlock", str(e))
-
+ while True:
+ if SANLock._sanlock_fd is None:
try:
- sanlock.acquire(self._sdUUID, resource, lockDisk,
- slkfd=SANLock._sanlock_fd, shared=shared)
+ SANLock._sanlock_fd = sanlock.register()
except sanlock.SanlockException as e:
- if e.errno != os.errno.EPIPE:
- raise se.AcquireLockFailure(
- self._sdUUID, e.errno,
- "Cannot acquire sanlock resource", str(e))
- SANLock._sanlock_fd = None
- continue
+ raise se.AcquireLockFailure(
+ self._sdUUID, e.errno, "Cannot register to sanlock",
+ str(e))
- break
+ try:
+ sanlock.acquire(self._sdUUID, resource, lockDisk,
+ slkfd=SANLock._sanlock_fd, shared=shared)
+ except sanlock.SanlockException as e:
+ if e.errno != os.errno.EPIPE:
+ raise se.AcquireLockFailure(
+ self._sdUUID, e.errno,
+ "Cannot acquire sanlock resource", str(e))
+ SANLock._sanlock_fd = None
+ continue
- self.log.debug("Resource %s successfully acquired", resource)
+ break
+
+ self.log.debug("Resource %s successfully acquired", resource)
def _inquire(self, resource, lockDisk):
res = sanlock.read_resource(*lockDisk[0])
@@ -374,17 +385,16 @@
return res, owners
def _release(self, resource, lockDisk):
- with self._lock:
- self.log.info("Releasing resource %s", resource)
+ self.log.info("Releasing resource %s", resource)
- try:
- sanlock.release(self._sdUUID, resource, lockDisk,
- slkfd=SANLock._sanlock_fd)
- except sanlock.SanlockException as e:
- raise se.ReleaseLockFailure(resource, e)
+ try:
+ sanlock.release(self._sdUUID, resource, lockDisk,
+ slkfd=SANLock._sanlock_fd)
+ except sanlock.SanlockException as e:
+ raise se.ReleaseLockFailure(resource, e)
- self._sanlockfd = None
- self.log.debug("Resource %s successfully released", resource)
+ self._sanlockfd = None
+ self.log.debug("Resource %s successfully released", resource)
class LocalLock(object):
--
To view, visit https://gerrit.ovirt.org/40378
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I846116ae16e88a51bdce20f97ddf22859dea3086
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
6 years, 9 months
Change in vdsm[master]: XXX: Insert sleep when creating block SD
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: XXX: Insert sleep when creating block SD
......................................................................
XXX: Insert sleep when creating block SD
Change-Id: I10ed63d747f1353da824af8ec56ac8a6f66d666b
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/blockSD.py
1 file changed, 4 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/53/43553/1
diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py
index 0b8bdc7..0d5e403 100644
--- a/vdsm/storage/blockSD.py
+++ b/vdsm/storage/blockSD.py
@@ -896,6 +896,10 @@
# Create VMS file system
_createVMSfs(os.path.join("/dev", vgName, MASTERLV))
+ # XXX: When running an iSCSI server and initiator on the same host we
+ # need to wait a bit for IO to settle before deactivating LVs
+ import time
+ time.sleep(10)
lvm.deactivateLVs(vgName, MASTERLV)
path = lvm.lvPath(vgName, sd.METADATA)
--
To view, visit https://gerrit.ovirt.org/43553
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I10ed63d747f1353da824af8ec56ac8a6f66d666b
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
6 years, 9 months
Change in vdsm[master]: sdm: Add find_domain_manifest helpers
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: sdm: Add find_domain_manifest helpers
......................................................................
sdm: Add find_domain_manifest helpers
Change-Id: Ia5d139bde2349c8231141fe842d9ada96fa279fe
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M tests/Makefile.am
A tests/domainmanifestfactory_tests.py
M tests/manifest_tests.py
M tests/storagefakelib.py
M tests/storagetestutils.py
M vdsm.spec.in
M vdsm/storage/Makefile.am
M vdsm/storage/blockSD.py
A vdsm/storage/domainManifestFactory.py
M vdsm/storage/glusterSD.py
M vdsm/storage/localFsSD.py
M vdsm/storage/nfsSD.py
12 files changed, 214 insertions(+), 3 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/55/43555/1
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 9cc3692..f116fc4 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -43,6 +43,7 @@
cpuProfileTests.py \
deviceTests.py \
domainDescriptorTests.py \
+ domainmanifestfactory_tests.py \
encodingTests.py \
executorTests.py \
fileSDTests.py \
diff --git a/tests/domainmanifestfactory_tests.py b/tests/domainmanifestfactory_tests.py
new file mode 100644
index 0000000..4c3f63e
--- /dev/null
+++ b/tests/domainmanifestfactory_tests.py
@@ -0,0 +1,124 @@
+# Copyright 2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+
+import os
+import uuid
+
+from testlib import VdsmTestCase, namedTemporaryDir
+from monkeypatch import MonkeyPatchScope
+from storagefakelib import FakeLVM
+from storagetestutils import make_blocksd_manifest, make_vg, \
+ make_filesd_manifest
+
+from storage import sd, blockSD, fileSD, localFsSD, mount
+from storage import storage_exception as se
+from storage import domainManifestFactory
+
+
+class DomainManifestFactoryTests(VdsmTestCase):
+ def get_patches(self, tmpdir, lvm):
+ return [(blockSD, 'lvm', lvm),
+ (blockSD, 'selectMetadata', lambda x: {}),
+ (sd.StorageDomain, 'storage_repository', tmpdir),
+ (mount, 'isMounted', lambda x: True)]
+
+ def make_mnt(self, tmpdir, link_name):
+ mntdir = os.path.join(tmpdir, sd.DOMAIN_MNT_POINT)
+ os.mkdir(mntdir)
+ os.symlink(tmpdir, os.path.join(mntdir, link_name))
+
+ def test_localfs_layout(self):
+ with namedTemporaryDir() as tmpdir:
+ self.make_mnt(tmpdir, '_localfssd')
+ lvm = FakeLVM(tmpdir)
+ with MonkeyPatchScope(self.get_patches(tmpdir, lvm)):
+ manifest = make_filesd_manifest(tmpdir)
+ found_manifest = domainManifestFactory.produce(
+ manifest.sdUUID, manifest._metadata)
+ self.assertEquals(manifest.sdUUID, found_manifest.sdUUID)
+ self.assertIsInstance(found_manifest,
+ fileSD.FileStorageDomainManifest)
+
+ def test_nfs_layout(self):
+ with namedTemporaryDir() as tmpdir:
+ self.make_mnt(tmpdir, '192.168.1.1:_localfssd')
+ lvm = FakeLVM(tmpdir)
+ with MonkeyPatchScope(self.get_patches(tmpdir, lvm)):
+ manifest = make_filesd_manifest(tmpdir)
+ found_manifest = domainManifestFactory.produce(
+ manifest.sdUUID, manifest._metadata)
+ self.assertEquals(manifest.sdUUID, found_manifest.sdUUID)
+ self.assertIsInstance(found_manifest,
+ fileSD.FileStorageDomainManifest)
+
+ def test_glusterfs_layout(self):
+ with namedTemporaryDir() as tmpdir:
+ mntdir = os.path.join(tmpdir, sd.DOMAIN_MNT_POINT)
+ glusterdir = os.path.join(mntdir, sd.GLUSTERSD_DIR)
+ os.makedirs(glusterdir)
+ os.symlink(tmpdir, os.path.join(glusterdir, 'host:_glustersd'))
+
+ lvm = FakeLVM(tmpdir)
+ with MonkeyPatchScope(self.get_patches(tmpdir, lvm)):
+ manifest = make_filesd_manifest(tmpdir)
+ found_manifest = domainManifestFactory.produce(
+ manifest.sdUUID, manifest._metadata)
+ self.assertEquals(manifest.sdUUID, found_manifest.sdUUID)
+ self.assertEquals(manifest.sdUUID, found_manifest.sdUUID)
+ self.assertIsInstance(found_manifest,
+ fileSD.FileStorageDomainManifest)
+
+ def test_block_layout(self):
+ with namedTemporaryDir() as tmpdir:
+ lvm = FakeLVM(tmpdir)
+ with MonkeyPatchScope(self.get_patches(tmpdir, lvm)):
+ manifest = make_blocksd_manifest()
+ vg_name = make_vg(lvm, manifest)
+ lvm.vgmd[vg_name]['tags'] = [blockSD.STORAGE_DOMAIN_TAG]
+ found_manifest = domainManifestFactory.produce(
+ manifest.sdUUID, manifest._metadata)
+ self.assertEquals(manifest.sdUUID, found_manifest.sdUUID)
+ self.assertIsInstance(found_manifest,
+ blockSD.BlockStorageDomainManifest)
+
+ def test_cached_finder(self):
+ def _trap(unused):
+ raise RuntimeError()
+
+ with namedTemporaryDir() as tmpdir:
+ self.make_mnt(tmpdir, '_localfssd')
+ lvm = FakeLVM(tmpdir)
+ patches = self.get_patches(tmpdir, lvm)
+ with MonkeyPatchScope(patches):
+ manifest = make_filesd_manifest(tmpdir)
+ res1 = domainManifestFactory.produce(
+ manifest.sdUUID, manifest._metadata)
+ patches.append((localFsSD, 'findDomainManifest', _trap))
+ with MonkeyPatchScope(patches):
+ res2 = domainManifestFactory.produce(
+ manifest.sdUUID, manifest._metadata)
+ self.assertEquals(res1.sdUUID, res2.sdUUID)
+
+ def test_not_found(self):
+ sdUUID = str(uuid.uuid4())
+ with namedTemporaryDir() as tmpdir:
+ lvm = FakeLVM(tmpdir)
+ with MonkeyPatchScope(self.get_patches(tmpdir, lvm)):
+ self.assertRaises(se.StorageDomainDoesNotExist,
+ domainManifestFactory.produce, sdUUID)
\ No newline at end of file
diff --git a/tests/manifest_tests.py b/tests/manifest_tests.py
index a22d7cf..967b663 100644
--- a/tests/manifest_tests.py
+++ b/tests/manifest_tests.py
@@ -161,7 +161,6 @@
imguuid = str(uuid.uuid4())
imagepath = manifest.getImagePath(imguuid)
- os.makedirs(os.path.dirname(imagepath))
with open(imagepath, 'w') as f:
f.truncate(0)
self.assertEquals(set(), manifest.getAllImages())
diff --git a/tests/storagefakelib.py b/tests/storagefakelib.py
index cb40fbb..bf79d91 100644
--- a/tests/storagefakelib.py
+++ b/tests/storagefakelib.py
@@ -23,6 +23,7 @@
from copy import deepcopy
from storage import lvm as real_lvm
+from storage import storage_exception as se
class FakeLVM(object):
@@ -154,7 +155,11 @@
return real_lvm.PV(**md)
def getVG(self, vgName):
- vg_md = deepcopy(self.vgmd[vgName])
+ try:
+ vg_md = deepcopy(self.vgmd[vgName])
+ except KeyError:
+ raise se.VolumeGroupDoesNotExist(vgName)
+
vg_attr = real_lvm.VG_ATTR(**vg_md['attr'])
vg_md['attr'] = vg_attr
return real_lvm.VG(**vg_md)
diff --git a/tests/storagetestutils.py b/tests/storagetestutils.py
index ed052af..eae803a 100644
--- a/tests/storagetestutils.py
+++ b/tests/storagetestutils.py
@@ -79,7 +79,7 @@
if metadata is None:
metadata = {sd.DMDK_VERSION: 3}
manifest = fileSD.FileStorageDomainManifest(domain_path, metadata)
- os.makedirs(os.path.join(manifest.domaindir, sduuid, sd.DOMAIN_IMAGES))
+ os.makedirs(os.path.join(manifest.domaindir, sd.DOMAIN_IMAGES))
return manifest
diff --git a/vdsm.spec.in b/vdsm.spec.in
index 4ba9812..57e673e 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -906,6 +906,7 @@
%{_datadir}/%{vdsm_name}/storage/curlImgWrap.py*
%{_datadir}/%{vdsm_name}/storage/devicemapper.py*
%{_datadir}/%{vdsm_name}/storage/dispatcher.py*
+%{_datadir}/%{vdsm_name}/storage/domainManifestFactory.py
%{_datadir}/%{vdsm_name}/storage/monitor.py*
%{_datadir}/%{vdsm_name}/storage/fileSD.py*
%{_datadir}/%{vdsm_name}/storage/fileUtils.py*
diff --git a/vdsm/storage/Makefile.am b/vdsm/storage/Makefile.am
index a4ec93a..e460518 100644
--- a/vdsm/storage/Makefile.am
+++ b/vdsm/storage/Makefile.am
@@ -31,6 +31,7 @@
curlImgWrap.py \
devicemapper.py \
dispatcher.py \
+ domainManifestFactory.py \
fileSD.py \
fileUtils.py \
fileVolume.py \
diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py
index 121cc6b..6de2737 100644
--- a/vdsm/storage/blockSD.py
+++ b/vdsm/storage/blockSD.py
@@ -1426,5 +1426,10 @@
return BlockStorageDomain(BlockStorageDomain.findDomainPath(sdUUID))
+def findDomainManifest(sdUUID, metadata=None):
+ return BlockStorageDomainManifest(
+ BlockStorageDomain.findDomainPath(sdUUID), metadata)
+
+
def getStorageDomainsList():
return [vg.name for vg in lvm.getAllVGs() if _isSD(vg)]
diff --git a/vdsm/storage/domainManifestFactory.py b/vdsm/storage/domainManifestFactory.py
new file mode 100644
index 0000000..fced9de
--- /dev/null
+++ b/vdsm/storage/domainManifestFactory.py
@@ -0,0 +1,60 @@
+#
+# Copyright 2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+
+import logging
+
+import blockSD, glusterSD, localFsSD, nfsSD
+import storage_exception as se
+
+
+log = logging.getLogger('Storage.domainManifestFactory')
+
+domain_manifest_producers = {}
+
+
+def produce(sdUUID, metadata=None):
+ log.error("looking for domain manifest %s", sdUUID)
+
+ # Check our cache of finder functions first. Since a domain manifest
+ # cannot change its type we don't have to worry about invalidation.
+ try:
+ producer = domain_manifest_producers[sdUUID]
+ except KeyError:
+ pass
+ else:
+ return producer(sdUUID, metadata)
+
+ # The order is somewhat important, it's ordered
+ # by how quickly get can find the domain. For instance
+ # if an nfs mount is unavailable we will get stuck
+ # until it times out, this should affect fetching
+ # of block\local domains. If for any case in the future
+ # this changes, please update the order.
+ for mod in (blockSD, glusterSD, localFsSD, nfsSD):
+ try:
+ manifest = mod.findDomainManifest(sdUUID, metadata)
+ domain_manifest_producers[sdUUID] = mod.findDomainManifest
+ return manifest
+ except se.StorageDomainDoesNotExist:
+ pass
+ except Exception:
+ log.exception("Error while looking for domain `%s`", sdUUID)
+
+ raise se.StorageDomainDoesNotExist(sdUUID)
diff --git a/vdsm/storage/glusterSD.py b/vdsm/storage/glusterSD.py
index 2931083..a62e242 100644
--- a/vdsm/storage/glusterSD.py
+++ b/vdsm/storage/glusterSD.py
@@ -30,3 +30,8 @@
def findDomain(sdUUID):
return GlusterStorageDomain(GlusterStorageDomain.findDomainPath(sdUUID))
+
+
+def findDomainManifest(sdUUID, metadata=None):
+ return fileSD.FileStorageDomainManifest(
+ GlusterStorageDomain.findDomainPath(sdUUID), metadata)
diff --git a/vdsm/storage/localFsSD.py b/vdsm/storage/localFsSD.py
index 7e11330..b4e0ce0 100644
--- a/vdsm/storage/localFsSD.py
+++ b/vdsm/storage/localFsSD.py
@@ -119,3 +119,8 @@
def findDomain(sdUUID):
return LocalFsStorageDomain(LocalFsStorageDomain.findDomainPath(sdUUID))
+
+
+def findDomainManifest(sdUUID, metadata=None):
+ return fileSD.FileStorageDomainManifest(
+ LocalFsStorageDomain.findDomainPath(sdUUID), metadata)
\ No newline at end of file
diff --git a/vdsm/storage/nfsSD.py b/vdsm/storage/nfsSD.py
index 1f94439..739efc8 100644
--- a/vdsm/storage/nfsSD.py
+++ b/vdsm/storage/nfsSD.py
@@ -120,3 +120,8 @@
def findDomain(sdUUID):
return NfsStorageDomain(NfsStorageDomain.findDomainPath(sdUUID))
+
+
+def findDomainManifest(sdUUID, metadata=None):
+ return fileSD.FileStorageDomainManifest(
+ NfsStorageDomain.findDomainPath(sdUUID), metadata)
--
To view, visit https://gerrit.ovirt.org/43555
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ia5d139bde2349c8231141fe842d9ada96fa279fe
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
6 years, 9 months
Change in vdsm[master]: sdm: Add decorators for sdm-only functions and their callers
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: sdm: Add decorators for sdm-only functions and their callers
......................................................................
sdm: Add decorators for sdm-only functions and their callers
Change-Id: Iaf736144e5640519851dc9175b5f17539d0ce23e
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M tests/Makefile.am
A tests/sdm_tests.py
M vdsm.spec.in
M vdsm/storage/Makefile.am
A vdsm/storage/sdm.py
5 files changed, 102 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/56/43556/1
diff --git a/tests/Makefile.am b/tests/Makefile.am
index f116fc4..5907a0d 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -92,6 +92,7 @@
scheduleTests.py \
schemaTests.py \
schemaValidationTest.py \
+ sdm_tests.py \
securableTests.py \
sourceroutingTests.py \
sslhelper.py \
diff --git a/tests/sdm_tests.py b/tests/sdm_tests.py
new file mode 100644
index 0000000..95ae126
--- /dev/null
+++ b/tests/sdm_tests.py
@@ -0,0 +1,43 @@
+#
+# Copyright 2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+
+from testlib import VdsmTestCase
+
+from storage import sdm
+from storage.threadLocal import vars
+
+
+class RequireSDMTests(VdsmTestCase):
+ def setUp(self):
+ try:
+ delattr(vars, '__sdm__')
+ except AttributeError:
+ pass
+
+ @sdm.sdm_verb
+ def test_allowed(self):
+ self.assertTrue(self.sdm_fn())
+
+ def test_denied(self):
+ self.assertRaises(sdm.SDMFunctionNotCallable, self.sdm_fn)
+
+ @sdm.require_sdm
+ def sdm_fn(self):
+ return True
\ No newline at end of file
diff --git a/vdsm.spec.in b/vdsm.spec.in
index 57e673e..24e0ab7 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -936,6 +936,7 @@
%{_datadir}/%{vdsm_name}/storage/clusterlock.py*
%{_datadir}/%{vdsm_name}/storage/sdc.py*
%{_datadir}/%{vdsm_name}/storage/sd.py*
+%{_datadir}/%{vdsm_name}/storage/sdm.py*
%{_datadir}/%{vdsm_name}/storage/securable.py*
%{_datadir}/%{vdsm_name}/storage/sp.py*
%{_datadir}/%{vdsm_name}/storage/spbackends.py*
diff --git a/vdsm/storage/Makefile.am b/vdsm/storage/Makefile.am
index e460518..51768ce 100644
--- a/vdsm/storage/Makefile.am
+++ b/vdsm/storage/Makefile.am
@@ -59,6 +59,7 @@
resourceManager.py \
sdc.py \
sd.py \
+ sdm.py \
securable.py \
sp.py \
spbackends.py \
diff --git a/vdsm/storage/sdm.py b/vdsm/storage/sdm.py
new file mode 100644
index 0000000..97e599f
--- /dev/null
+++ b/vdsm/storage/sdm.py
@@ -0,0 +1,56 @@
+#
+# Copyright 2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+
+from functools import wraps
+from storage.threadLocal import vars
+
+
+class SDMFunctionNotCallable(Exception):
+ pass
+
+
+def require_sdm(f):
+ """
+ SDM method decorator.
+
+ This decorator is used to mark methods that can only be called when the
+ storage is operating in SDM mode (ie. without SPM).
+ """
+ @wraps(f)
+ def wrapper(*args, **kwds):
+ if getattr(vars, '__sdm__', False):
+ return f(*args, **kwds)
+ else:
+ raise SDMFunctionNotCallable(f.__name__)
+ return wrapper
+
+
+def sdm_verb(f):
+ """
+ SDM verb decorator
+
+ This decorator indicates that a function is designed to work without SPM
+ and is approved to access SDM-only functions.
+ """
+ @wraps(f)
+ def wrapper(*args, **kwds):
+ vars.__sdm__ = True
+ return f(*args, **kwds)
+ return wrapper
--
To view, visit https://gerrit.ovirt.org/43556
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Iaf736144e5640519851dc9175b5f17539d0ce23e
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
6 years, 9 months
Change in vdsm[master]: storage: Factor apparentsize adjustment into a reusable func...
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: storage: Factor apparentsize adjustment into a reusable function
......................................................................
storage: Factor apparentsize adjustment into a reusable function
When creating a new volume sometimes the actual size is larger than the
requested size due to underlying storage limitations (such as physical
extent granularity in LVM). In these cases, the new volume's size must
be adjusted to match the actual size. The SDM volume creation code
would also like to use this function so extract it into the
VolumeMetadata class.
Change-Id: I7395850e127b941fc50ae8d5659868a2499b5076
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/volume.py
1 file changed, 28 insertions(+), 18 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/44/44044/1
diff --git a/vdsm/storage/volume.py b/vdsm/storage/volume.py
index f7e5bc0..723b3d9 100644
--- a/vdsm/storage/volume.py
+++ b/vdsm/storage/volume.py
@@ -495,6 +495,31 @@
puuid)
return None
+ @classmethod
+ def adjust_new_volume_size(cls, dom_manifest, img_id, vol_id, req_size,
+ vol_format):
+ # When the volume format is raw what the guest sees is the apparent
+ # size of the file/device therefore if the requested size doesn't
+ # match the apparent size (eg: physical extent granularity in LVM)
+ # we need to update the size value so that the metadata reflects
+ # the correct state.
+ if vol_format == RAW_FORMAT:
+ apparentSize = int(dom_manifest.getVSize(img_id, vol_id) /
+ BLOCK_SIZE)
+ if apparentSize < req_size:
+ cls.log.error("The volume %s apparent size %s is smaller "
+ "than the requested size %s",
+ vol_id, apparentSize, req_size)
+ raise se.VolumeCreationError()
+ if apparentSize > req_size:
+ cls.log.info("The requested size for volume %s doesn't "
+ "match the granularity on domain %s, "
+ "updating the volume size from %s to %s",
+ vol_id, dom_manifest.sdUUID, req_size,
+ apparentSize)
+ return apparentSize
+ return req_size
+
class Volume(object):
log = logging.getLogger('Storage.Volume')
@@ -850,24 +875,9 @@
cls.log.error("Failed to create volume %s: %s", volPath, e)
vars.task.popRecovery()
raise
- # When the volume format is raw what the guest sees is the apparent
- # size of the file/device therefore if the requested size doesn't
- # match the apparent size (eg: physical extent granularity in LVM)
- # we need to update the size value so that the metadata reflects
- # the correct state.
- if volFormat == RAW_FORMAT:
- apparentSize = int(dom.getVSize(imgUUID, volUUID) / BLOCK_SIZE)
- if apparentSize < size:
- cls.log.error("The volume %s apparent size %s is smaller "
- "than the requested size %s",
- volUUID, apparentSize, size)
- raise se.VolumeCreationError()
- if apparentSize > size:
- cls.log.info("The requested size for volume %s doesn't "
- "match the granularity on domain %s, "
- "updating the volume size from %s to %s",
- volUUID, sdUUID, size, apparentSize)
- size = apparentSize
+
+ size = VolumeMetadata.adjust_new_volume_size(dom, imgUUID, volUUID,
+ size, volFormat)
vars.task.pushRecovery(
task.Recovery("Create volume metadata rollback", clsModule,
--
To view, visit https://gerrit.ovirt.org/44044
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I7395850e127b941fc50ae8d5659868a2499b5076
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
6 years, 9 months
Change in vdsm[master]: storage: Rename _share to share_to_image
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: storage: Rename _share to share_to_image
......................................................................
storage: Rename _share to share_to_image
When a new volume is created whose parent is a template volume, that
template must be "shared" into the new volume's image since volume
chains only link volumes in the same image. This logic is reusable by
the SDM create volume flow but _share should be renamed to
share_to_image to remove the _ which designates this as a protected
method.
Change-Id: I44c7fe43ada38aa97bbd9c08438a0ee686680330
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/blockVolume.py
M vdsm/storage/fileVolume.py
M vdsm/storage/volume.py
3 files changed, 3 insertions(+), 3 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/46/44046/1
diff --git a/vdsm/storage/blockVolume.py b/vdsm/storage/blockVolume.py
index 47336b9..23e636e 100644
--- a/vdsm/storage/blockVolume.py
+++ b/vdsm/storage/blockVolume.py
@@ -314,7 +314,7 @@
def refreshVolume(self):
lvm.refreshLVs(self.sdUUID, (self.volUUID,))
- def _share(self, dstImgPath):
+ def share_to_image(self, dstImgPath):
"""
Share this volume to dstImgPath
"""
diff --git a/vdsm/storage/fileVolume.py b/vdsm/storage/fileVolume.py
index 15ca05b..c5b8667 100644
--- a/vdsm/storage/fileVolume.py
+++ b/vdsm/storage/fileVolume.py
@@ -300,7 +300,7 @@
os.path.join(dstImgPath, self.volUUID))
self.oop.utils.forceLink(self._getLeaseVolumePath(), dstLeasePath)
- def _share(self, dstImgPath):
+ def share_to_image(self, dstImgPath):
"""
Share this volume to dstImgPath, including the metadata and the lease
"""
diff --git a/vdsm/storage/volume.py b/vdsm/storage/volume.py
index 0d61643..951efe0 100644
--- a/vdsm/storage/volume.py
+++ b/vdsm/storage/volume.py
@@ -598,7 +598,7 @@
self.md.removeMetadata()
def _share(self, dstImgPath):
- return self.md._share(dstImgPath)
+ return self.md.share_to_image(dstImgPath)
@classmethod
def _getModuleAndClass(cls):
--
To view, visit https://gerrit.ovirt.org/44046
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I44c7fe43ada38aa97bbd9c08438a0ee686680330
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
6 years, 9 months
Change in vdsm[master]: storage: Rename getParent to getParentId
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: storage: Rename getParent to getParentId
......................................................................
storage: Rename getParent to getParentId
The Volume and VolumeMetadata classes have functions to return the
parent UUID and parent volume instance. It's a bit unclear that
getParent returns the ID so let's make this more explicit by renaming
getParent to getParentId.
Change-Id: Ie310d731709ebf1ba3589e92a7ae506ccdc8ae22
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/blockVolume.py
M vdsm/storage/fileVolume.py
M vdsm/storage/image.py
M vdsm/storage/sp.py
M vdsm/storage/volume.py
5 files changed, 20 insertions(+), 20 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/47/44047/1
diff --git a/vdsm/storage/blockVolume.py b/vdsm/storage/blockVolume.py
index 23e636e..0168d64 100644
--- a/vdsm/storage/blockVolume.py
+++ b/vdsm/storage/blockVolume.py
@@ -167,7 +167,7 @@
def getParentMeta(self):
return self.getMetaParam(volume.PUUID)
- def getParent(self):
+ def getParentId(self):
"""
Return parent volume UUID
"""
@@ -490,7 +490,7 @@
try:
# We need to blank parent record in our metadata
# for parent to become leaf successfully.
- puuid = self.getParent()
+ puuid = self.getParentId()
self.setParent(volume.BLANK_UUID)
if puuid and puuid != volume.BLANK_UUID:
pvol = BlockVolume(self.repoPath, self.sdUUID, self.imgUUID,
diff --git a/vdsm/storage/fileVolume.py b/vdsm/storage/fileVolume.py
index c5b8667..2dad91f 100644
--- a/vdsm/storage/fileVolume.py
+++ b/vdsm/storage/fileVolume.py
@@ -150,7 +150,7 @@
return out
- def getParent(self):
+ def getParentId(self):
"""
Return parent volume UUID
"""
@@ -452,7 +452,7 @@
try:
# We need to blank parent record in our metadata
# for parent to become leaf successfully.
- puuid = self.getParent()
+ puuid = self.getParentId()
self.setParent(volume.BLANK_UUID)
if puuid and puuid != volume.BLANK_UUID:
pvol = FileVolume(self.repoPath, self.sdUUID,
diff --git a/vdsm/storage/image.py b/vdsm/storage/image.py
index fa0770c..43e08e5 100644
--- a/vdsm/storage/image.py
+++ b/vdsm/storage/image.py
@@ -232,7 +232,7 @@
chain.insert(0, srcVol)
seen.add(srcVol.volUUID)
- parentUUID = srcVol.getParent()
+ parentUUID = srcVol.getParentId()
if parentUUID == volume.BLANK_UUID:
break
@@ -578,7 +578,7 @@
raise se.VolumeNotSparse()
srcVolume = self._getSparsifyVolume(tmpSdUUID, tmpImgUUID,
- tmpVolume.getParent())
+ tmpVolume.getParentId())
tmpVolume.prepare()
try:
@@ -898,7 +898,7 @@
successor = chain[-1]
tmpVol = volclass(self.repoPath, sdDom.sdUUID, imgUUID, successor)
dstParent = volclass(self.repoPath, sdDom.sdUUID, imgUUID,
- ancestor).getParent()
+ ancestor).getParentId()
# Mark all volumes as illegal
while tmpVol and dstParent != tmpVol.volUUID:
@@ -926,7 +926,7 @@
successor = chain[-1]
srcVol = volclass(self.repoPath, sdUUID, imgUUID, successor)
dstParent = volclass(self.repoPath, sdUUID, imgUUID,
- ancestor).getParent()
+ ancestor).getParentId()
while srcVol and dstParent != srcVol.volUUID:
try:
@@ -953,7 +953,7 @@
successor = chain[-1]
srcVol = volclass(self.repoPath, sdDom.sdUUID, imgUUID, successor)
dstParent = volclass(self.repoPath, sdDom.sdUUID, imgUUID,
- ancestor).getParent()
+ ancestor).getParentId()
while srcVol and dstParent != srcVol.volUUID:
self.log.info("Remove volume %s from image %s", srcVol.volUUID,
@@ -1143,12 +1143,12 @@
"""
chain = []
accumulatedChainSize = 0
- endVolName = vols[ancestor].getParent() # TemplateVolName or None
+ endVolName = vols[ancestor].getParentId() # TemplateVolName or None
currVolName = successor
while (currVolName != endVolName):
chain.insert(0, currVolName)
accumulatedChainSize += vols[currVolName].getVolumeSize()
- currVolName = vols[currVolName].getParent()
+ currVolName = vols[currVolName].getParentId()
return accumulatedChainSize, chain
@@ -1169,7 +1169,7 @@
self.log.debug("unlinking subchain: %s" % subChain)
sdDom = sdCache.produce(sdUUID=sdUUID)
- dstParent = sdDom.produceVolume(imgUUID, subChain[0]).getParent()
+ dstParent = sdDom.produceVolume(imgUUID, subChain[0]).getParentId()
subChainTailVol = sdDom.produceVolume(imgUUID, subChain[-1])
if subChainTailVol.isLeaf():
self.log.debug("Leaf volume is being removed from the chain. "
@@ -1247,10 +1247,10 @@
srcVolParams = srcVol.getVolumeParams()
srcVolParams['children'] = []
for vName, vol in vols.iteritems():
- if vol.getParent() == successor:
+ if vol.getParentId() == successor:
srcVolParams['children'].append(vol)
dstVol = vols[ancestor]
- dstParentUUID = dstVol.getParent()
+ dstParentUUID = dstVol.getParentId()
if dstParentUUID != sd.BLANK_UUID:
volParams = vols[dstParentUUID].getVolumeParams()
else:
diff --git a/vdsm/storage/sp.py b/vdsm/storage/sp.py
index 8574bac..b92f107 100644
--- a/vdsm/storage/sp.py
+++ b/vdsm/storage/sp.py
@@ -1838,7 +1838,7 @@
volUUID=srcVolUUID)
if not srcVol.isShared():
- if srcVol.getParent() == volume.BLANK_UUID:
+ if srcVol.getParentId() == volume.BLANK_UUID:
with rmanager.acquireResource(imageResourcesNamespace,
srcImgUUID,
rm.LockType.exclusive):
diff --git a/vdsm/storage/volume.py b/vdsm/storage/volume.py
index 951efe0..691d3b9 100644
--- a/vdsm/storage/volume.py
+++ b/vdsm/storage/volume.py
@@ -250,7 +250,7 @@
"disktype": meta.get(DISKTYPE, ""),
"voltype": meta.get(VOLTYPE, ""),
"size": int(meta.get(SIZE, "0")),
- "parent": self.getParent(),
+ "parent": self.getParentId(),
"description": meta.get(DESCRIPTION, ""),
"pool": meta.get(sd.DMDK_POOLS, ""),
"domain": meta.get(DOMAIN, ""),
@@ -310,7 +310,7 @@
volParams['size'] = self.getSize()
volParams['apparentsize'] = self.getVolumeSize(bs=bs)
volParams['truesize'] = self.getVolumeTrueSize(bs=bs)
- volParams['parent'] = self.getParent()
+ volParams['parent'] = self.getParentId()
volParams['descr'] = self.getDescription()
volParams['legality'] = self.getLegality()
return volParams
@@ -494,7 +494,7 @@
"""
Return parent volume object
"""
- puuid = self.getParent()
+ puuid = self.getParentId()
if puuid and puuid != BLANK_UUID:
return sdCache.produce(self.sdUUID).produceVolume(self.imgUUID,
puuid)
@@ -571,11 +571,11 @@
"""
return self.md.getMetadata(metaId)
- def getParent(self):
+ def getParentId(self):
"""
Return parent volume UUID
"""
- return self.md.getParent()
+ return self.md.getParentId()
def getChildren(self):
""" Return children volume UUIDs.
--
To view, visit https://gerrit.ovirt.org/44047
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ie310d731709ebf1ba3589e92a7ae506ccdc8ae22
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
6 years, 9 months
Change in vdsm[master]: storage: Rename getParentVolume* to produceParent
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: storage: Rename getParentVolume* to produceParent
......................................................................
storage: Rename getParentVolume* to produceParent
The Volume object provides getParentVolume and the VolumeMetadata object
provides getParentVolumeMetadata. Both of these functions return an
instance of their same type which represents this volume's parent.
Since the logic is the same and both object types implement compatible
interfaces let's unify around the name produceParent. Once complete,
consumers of these objects can use either type without additional
modifications.
Change-Id: I4553e5bcc8a576abc6fb472148f744c66e5012ef
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/image.py
M vdsm/storage/resourceFactories.py
M vdsm/storage/volume.py
3 files changed, 16 insertions(+), 16 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/48/44048/1
diff --git a/vdsm/storage/image.py b/vdsm/storage/image.py
index 43e08e5..bd79f6e 100644
--- a/vdsm/storage/image.py
+++ b/vdsm/storage/image.py
@@ -168,7 +168,7 @@
"""
chain = self.getChain(sdUUID, imgUUID, volUUID)
newsize = 0
- template = chain[0].getParentVolume()
+ template = chain[0].produceParent()
if template:
newsize = template.getVolumeSize()
for vol in chain:
@@ -241,7 +241,7 @@
imgUUID, srcVol.volUUID, parentUUID)
raise se.ImageIsNotLegalChain(imgUUID)
- srcVol = srcVol.getParentVolume()
+ srcVol = srcVol.produceParent()
self.log.info("sdUUID=%s imgUUID=%s chain=%s ", sdUUID, imgUUID, chain)
return chain
@@ -254,7 +254,7 @@
# Find all volumes of image (excluding template)
chain = self.getChain(sdUUID, imgUUID)
# check if the chain is build above a template, or it is a standalone
- pvol = chain[0].getParentVolume()
+ pvol = chain[0].produceParent()
if pvol:
tmpl = pvol
elif chain[0].isShared():
@@ -351,7 +351,7 @@
fakeTemplate = False
pimg = volume.BLANK_UUID # standalone chain
# check if the chain is build above a template, or it is a standalone
- pvol = srcChain[0].getParentVolume()
+ pvol = srcChain[0].produceParent()
if pvol:
# find out parent volume parameters
volParams = pvol.getVolumeParams()
@@ -454,7 +454,7 @@
srcFormat = volume.fmt2str(srcVol.getFormat())
dstFormat = volume.fmt2str(dstVol.getFormat())
- parentVol = dstVol.getParentVolume()
+ parentVol = dstVol.produceParent()
if parentVol is not None:
backing = volume.getBackingVolumePath(
@@ -731,7 +731,7 @@
raise se.ImageIsNotLegalChain(imgUUID)
chain = self.getChain(sdUUID, imgUUID)
# check if the chain is build above a template, or it is a standalone
- pvol = chain[0].getParentVolume()
+ pvol = chain[0].produceParent()
if pvol:
if not pvol.isLegal() or pvol.isFake():
raise se.ImageIsNotLegalChain(imgUUID)
@@ -902,7 +902,7 @@
# Mark all volumes as illegal
while tmpVol and dstParent != tmpVol.volUUID:
- vol = tmpVol.getParentVolume()
+ vol = tmpVol.produceParent()
tmpVol.setLegality(volume.ILLEGAL_VOL)
tmpVol = vol
@@ -932,7 +932,7 @@
try:
self.log.info("Teardown volume %s from image %s",
srcVol.volUUID, imgUUID)
- vol = srcVol.getParentVolume()
+ vol = srcVol.produceParent()
srcVol.teardown(sdUUID=srcVol.sdUUID, volUUID=srcVol.volUUID,
justme=True)
srcVol = vol
@@ -958,7 +958,7 @@
while srcVol and dstParent != srcVol.volUUID:
self.log.info("Remove volume %s from image %s", srcVol.volUUID,
imgUUID)
- vol = srcVol.getParentVolume()
+ vol = srcVol.produceParent()
srcVol.delete(postZero=postZero, force=True)
chain.remove(srcVol.volUUID)
srcVol = vol
@@ -1321,7 +1321,7 @@
def _activateVolumeForImportExport(self, domain, imgUUID, volUUID=None):
chain = self.getChain(domain.sdUUID, imgUUID, volUUID)
- template = chain[0].getParentVolume()
+ template = chain[0].produceParent()
if template or len(chain) > 1:
self.log.error("Importing and exporting an image with more "
diff --git a/vdsm/storage/resourceFactories.py b/vdsm/storage/resourceFactories.py
index 98c362b..fb2522d 100644
--- a/vdsm/storage/resourceFactories.py
+++ b/vdsm/storage/resourceFactories.py
@@ -126,7 +126,7 @@
return []
# check if the chain is build above a template, or it is a standalone
- pvol = chain[0].getParentVolume()
+ pvol = chain[0].produceParent()
if pvol:
template = pvol.volUUID
elif chain[0].isShared():
diff --git a/vdsm/storage/volume.py b/vdsm/storage/volume.py
index 691d3b9..a098eb7 100644
--- a/vdsm/storage/volume.py
+++ b/vdsm/storage/volume.py
@@ -490,7 +490,7 @@
"""
pass # Do not remove this method or the V3 upgrade will fail.
- def getParentVolume(self):
+ def produceParent(self):
"""
Return parent volume object
"""
@@ -659,7 +659,7 @@
Rebase volume on top of new backing volume
"""
if rollback:
- pvol = self.getParentVolume()
+ pvol = self.produceParent()
if not pvol:
self.log.warn("Can't rebase volume %s, parent missing",
self.volUUID)
@@ -787,7 +787,7 @@
"volUUID=%s imageDir=%s" %
(repoPath, sdUUID, imgUUID, volUUID, imageDir))
vol = sdCache.produce(sdUUID).produceVolume(imgUUID, volUUID)
- pvol = vol.getParentVolume()
+ pvol = vol.produceParent()
# Remove volume
vol.delete(postZero=False, force=True)
if len(cls.getImageVolumes(repoPath, sdUUID, imgUUID)):
@@ -1120,7 +1120,7 @@
try:
if justme:
return True
- pvol = self.getParentVolume()
+ pvol = self.produceParent()
if pvol:
pvol.prepare(rw=chainrw, justme=False,
chainrw=chainrw, setrw=setrw)
@@ -1149,7 +1149,7 @@
def getInfo(self):
return self.md.getInfo()
- def getParentVolume(self):
+ def produceParent(self):
"""
Return parent volume object
"""
--
To view, visit https://gerrit.ovirt.org/44048
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I4553e5bcc8a576abc6fb472148f744c66e5012ef
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
6 years, 9 months
Change in vdsm[master]: ImageManifest: move getChain
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: ImageManifest: move getChain
......................................................................
ImageManifest: move getChain
The implementation of getChain instantiates storagedomain and volume
objects. This must be fixed. We must fix all callers of getChain to
work with VolumeMetadata objects instead of Volume objects.
Change-Id: Iffd24be3ba74ccdea8dfc744b579c0cf63771cdb
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/image.py
1 file changed, 71 insertions(+), 66 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/49/44049/1
diff --git a/vdsm/storage/image.py b/vdsm/storage/image.py
index bd79f6e..f852f3c 100644
--- a/vdsm/storage/image.py
+++ b/vdsm/storage/image.py
@@ -89,6 +89,8 @@
class ImageManifest(object):
+ log = logging.getLogger('Storage.ImageManifest')
+
def __init__(self, repoPath):
self.repoPath = repoPath
@@ -105,6 +107,74 @@
log.info("Create placeholder %s for image's volumes", img_dir)
os.mkdir(img_dir)
return img_dir
+
+ def getChain(self, sdUUID, imgUUID, volUUID=None):
+ """
+ Return the chain of volumes of image as a sorted list
+ (not including a shared base (template) if any)
+ """
+ chain = []
+ volclass = sdCache.produce(sdUUID).getVolumeClass()
+
+ # Use volUUID when provided
+ if volUUID:
+ srcVol = volclass(self.repoPath, sdUUID, imgUUID, volUUID)
+
+ # For template images include only one volume (the template itself)
+ # NOTE: this relies on the fact that in a template there is only
+ # one volume
+ if srcVol.isShared():
+ return [srcVol]
+
+ # Find all the volumes when volUUID is not provided
+ else:
+ # Find all volumes of image
+ uuidlist = volclass.getImageVolumes(self.repoPath, sdUUID, imgUUID)
+
+ if not uuidlist:
+ raise se.ImageDoesNotExistInSD(imgUUID, sdUUID)
+
+ srcVol = volclass(self.repoPath, sdUUID, imgUUID, uuidlist[0])
+
+ # For template images include only one volume (the template itself)
+ if len(uuidlist) == 1 and srcVol.isShared():
+ return [srcVol]
+
+ # Searching for the leaf
+ for vol in uuidlist:
+ srcVol = volclass(self.repoPath, sdUUID, imgUUID, vol)
+
+ if srcVol.isLeaf():
+ break
+
+ srcVol = None
+
+ if not srcVol:
+ self.log.error("There is no leaf in the image %s", imgUUID)
+ raise se.ImageIsNotLegalChain(imgUUID)
+
+ # We have seen corrupted chains that cause endless loops here.
+ # https://bugzilla.redhat.com/1125197
+ seen = set()
+
+ # Build up the sorted parent -> child chain
+ while not srcVol.isShared():
+ chain.insert(0, srcVol)
+ seen.add(srcVol.volUUID)
+
+ parentUUID = srcVol.getParentId()
+ if parentUUID == volume.BLANK_UUID:
+ break
+
+ if parentUUID in seen:
+ self.log.error("Image %s volume %s has invalid parent UUID %s",
+ imgUUID, srcVol.volUUID, parentUUID)
+ raise se.ImageIsNotLegalChain(imgUUID)
+
+ srcVol = srcVol.produceParent()
+
+ self.log.info("sdUUID=%s imgUUID=%s chain=%s ", sdUUID, imgUUID, chain)
+ return chain
class Image:
@@ -179,72 +249,7 @@
return newsize
def getChain(self, sdUUID, imgUUID, volUUID=None):
- """
- Return the chain of volumes of image as a sorted list
- (not including a shared base (template) if any)
- """
- chain = []
- volclass = sdCache.produce(sdUUID).getVolumeClass()
-
- # Use volUUID when provided
- if volUUID:
- srcVol = volclass(self.repoPath, sdUUID, imgUUID, volUUID)
-
- # For template images include only one volume (the template itself)
- # NOTE: this relies on the fact that in a template there is only
- # one volume
- if srcVol.isShared():
- return [srcVol]
-
- # Find all the volumes when volUUID is not provided
- else:
- # Find all volumes of image
- uuidlist = volclass.getImageVolumes(self.repoPath, sdUUID, imgUUID)
-
- if not uuidlist:
- raise se.ImageDoesNotExistInSD(imgUUID, sdUUID)
-
- srcVol = volclass(self.repoPath, sdUUID, imgUUID, uuidlist[0])
-
- # For template images include only one volume (the template itself)
- if len(uuidlist) == 1 and srcVol.isShared():
- return [srcVol]
-
- # Searching for the leaf
- for vol in uuidlist:
- srcVol = volclass(self.repoPath, sdUUID, imgUUID, vol)
-
- if srcVol.isLeaf():
- break
-
- srcVol = None
-
- if not srcVol:
- self.log.error("There is no leaf in the image %s", imgUUID)
- raise se.ImageIsNotLegalChain(imgUUID)
-
- # We have seen corrupted chains that cause endless loops here.
- # https://bugzilla.redhat.com/1125197
- seen = set()
-
- # Build up the sorted parent -> child chain
- while not srcVol.isShared():
- chain.insert(0, srcVol)
- seen.add(srcVol.volUUID)
-
- parentUUID = srcVol.getParentId()
- if parentUUID == volume.BLANK_UUID:
- break
-
- if parentUUID in seen:
- self.log.error("Image %s volume %s has invalid parent UUID %s",
- imgUUID, srcVol.volUUID, parentUUID)
- raise se.ImageIsNotLegalChain(imgUUID)
-
- srcVol = srcVol.produceParent()
-
- self.log.info("sdUUID=%s imgUUID=%s chain=%s ", sdUUID, imgUUID, chain)
- return chain
+ return self.manifest.getChain(sdUUID, imgUUID, volUUID)
def getTemplate(self, sdUUID, imgUUID):
"""
--
To view, visit https://gerrit.ovirt.org/44049
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Iffd24be3ba74ccdea8dfc744b579c0cf63771cdb
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
6 years, 9 months
Change in vdsm[master]: fix: validate volumemetadata objects
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: fix: validate volumemetadata objects
......................................................................
fix: validate volumemetadata objects
Change-Id: I8330bb23c8109cee472904bde63caa75d10c0240
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/volume.py
1 file changed, 1 insertion(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/65/44565/1
diff --git a/vdsm/storage/volume.py b/vdsm/storage/volume.py
index a8211a2..d8cd3fb 100644
--- a/vdsm/storage/volume.py
+++ b/vdsm/storage/volume.py
@@ -142,6 +142,7 @@
raise se.InvalidParameterException("imgUUID", imgUUID)
if not volUUID or volUUID == BLANK_UUID:
raise se.InvalidParameterException("volUUID", volUUID)
+ self.validate()
@property
def imagePath(self):
@@ -176,7 +177,6 @@
def __init__(self, md):
self.md = md
- self.md.validate()
@property
def sdUUID(self):
--
To view, visit https://gerrit.ovirt.org/44565
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I8330bb23c8109cee472904bde63caa75d10c0240
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
6 years, 9 months