Hello Adam Litke, Nir Soffer,
I'd like you to do a code review. Please visit
https://gerrit.ovirt.org/64985
to review the following change.
Change subject: Live Merge: Teardown volume on HSM after live merge ......................................................................
Live Merge: Teardown volume on HSM after live merge
If a VM is running on HSM and live merge is performed, the LV isn't deactivated because the deactivation is done when deleting the volume. However, deleting the volume is done on SPM and this means that the LV is not deactivated on the HSM. In this patch, a logic to teardown the volume is added after live merge has completed.
Change-Id: Iec3b6adb50293d8c98f5d8726d668eb272d16549 Bug-Url: https://bugzilla.redhat.com/1377849 Signed-off-by: Ala Hino ahino@redhat.com Reviewed-on: https://gerrit.ovirt.org/64301 Reviewed-by: Nir Soffer nsoffer@redhat.com Continuous-Integration: Nir Soffer nsoffer@redhat.com Continuous-Integration: Jenkins CI Reviewed-by: Adam Litke alitke@redhat.com --- M vdsm/storage/blockSD.py M vdsm/storage/sd.py M vdsm/virt/vm.py 3 files changed, 32 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/85/64985/1
diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py index 8de13b1..58d2507 100644 --- a/vdsm/storage/blockSD.py +++ b/vdsm/storage/blockSD.py @@ -799,6 +799,21 @@ if preallocate == sc.SPARSE_VOL and volFormat == sc.RAW_FORMAT: raise se.IncorrectFormat(sc.type2name(volFormat))
+ def getVolumeLease(self, imgUUID, volUUID): + """ + Return the volume lease (leasePath, leaseOffset) + """ + if not self.hasVolumeLeases(): + return clusterlock.Lease(None, None, None) + # TODO: use the sanlock specific offset when present + slot = self.produceVolume(imgUUID, volUUID).getMetaOffset() + offset = ((slot + blockVolume.RESERVED_LEASES) * self.logBlkSize * + sd.LEASE_BLOCKS) + return clusterlock.Lease(volUUID, self.getLeasesFilePath(), offset) + + def teardownVolume(self, imgUUID, volUUID): + lvm.deactivateLVs(self.sdUUID, [volUUID]) +
class BlockStorageDomain(sd.StorageDomain): manifestClass = BlockStorageDomainManifest diff --git a/vdsm/storage/sd.py b/vdsm/storage/sd.py index 2b151a6..d004794 100644 --- a/vdsm/storage/sd.py +++ b/vdsm/storage/sd.py @@ -487,6 +487,14 @@ if preallocate is not None and preallocate not in sc.VOL_TYPE: raise se.IncorrectType(preallocate)
+ def teardownVolume(self, imgUUID, volUUID): + """ + Called when a volume is detached from a prepared image during live + merge flow. In this case, the volume will not be torn down when + the image is torn down. + This does nothing, subclass should override this if needed. + """ +
class StorageDomain(object): log = logging.getLogger("Storage.StorageDomain") diff --git a/vdsm/virt/vm.py b/vdsm/virt/vm.py index a896f58..22e656c 100644 --- a/vdsm/virt/vm.py +++ b/vdsm/virt/vm.py @@ -66,6 +66,7 @@ from vdsm.virt.utils import isVdsmImage, cleanup_guest_socket from storage import outOfProcess as oop from storage import sd +from storage import sdc
# local imports # In future those should be imported via .. @@ -4936,6 +4937,12 @@ self.drive.imageID, baseVolUUID, topVolInfo['capacity'])
+ def teardown_top_volume(self): + # TODO move this method to storage public API + sd_manifest = sdc.sdCache.produce_manifest(self.drive.domainID) + sd_manifest.teardownVolume(self.drive.imageID, + self.job['topVolume']) + @utils.traceback() def run(self): self.update_base_size() @@ -4946,6 +4953,8 @@ self.vm._syncVolumeChain(self.drive) if self.doPivot: self.vm.startDisksStatsCollection() + self.vm.enableDriveMonitor() + self.teardown_top_volume() self.success = True self.vm.log.info("Synchronization completed (job %s)", self.job['jobID'])