Federico Simoncelli has uploaded a new change for review.
Change subject: block: add reduceStorageDomain command
......................................................................
block: add reduceStorageDomain command
Change-Id: I467fc12d3787929b9c0e35f8806402f72f493368
Signed-off-by: Federico Simoncelli <fsimonce(a)redhat.com>
---
M client/vdsClient.py
M vdsm/API.py
M vdsm/BindingXMLRPC.py
M vdsm/storage/blockSD.py
M vdsm/storage/hsm.py
M vdsm/storage/lvm.py
M vdsm/storage/sp.py
M vdsm/storage/storage_exception.py
M vdsm_api/vdsmapi-schema.json
9 files changed, 86 insertions(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/74/26574/1
diff --git a/client/vdsClient.py b/client/vdsClient.py
index b324eb1..ccbdb15 100644
--- a/client/vdsClient.py
+++ b/client/vdsClient.py
@@ -678,6 +678,15 @@
return dom['status']['code'],
dom['status']['message']
return 0, ''
+ def reduceStorageDomain(self, args):
+ sdUUID = args[0]
+ spUUID = args[1]
+ devList = args[2].split(',')
+ dom = self.s.reduceStorageDomain(sdUUID, spUUID, devList)
+ if dom['status']['code']:
+ return dom['status']['code'],
dom['status']['message']
+ return 0, ''
+
def discoverST(self, args):
portal = args[0].split(":")
ip = portal[0]
@@ -2067,6 +2076,11 @@
'Extend the Storage Domain by adding devices'
' devlist (list of dev GUIDs)'
)),
+ 'reduceStorageDomain': (serv.reduceStorageDomain, (
+ '<sdUUID> <spUUID> <devlist>',
+ 'Reduce the Storage Domain by removing devices devlist (list of '
+ 'dev GUIDs)'
+ )),
'discoverST': (serv.discoverST,
('ip[:port] [username password]',
'Discover the available iSCSI targetnames on a '
diff --git a/vdsm/API.py b/vdsm/API.py
index 94b39b6..af22e50 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -961,6 +961,9 @@
return self._irs.extendStorageDomain(self._UUID, spUUID, devlist,
force)
+ def reduce(self, spUUID, devlist):
+ return self._irs.reduceStorageDomain(self._UUID, spUUID, devlist)
+
def format(self, autoDetach):
return self._irs.formatStorageDomain(self._UUID, autoDetach)
diff --git a/vdsm/BindingXMLRPC.py b/vdsm/BindingXMLRPC.py
index 76251f5..4def574 100644
--- a/vdsm/BindingXMLRPC.py
+++ b/vdsm/BindingXMLRPC.py
@@ -546,6 +546,10 @@
domain = API.StorageDomain(sdUUID)
return domain.extend(spUUID, devlist, force)
+ def domainReduce(self, sdUUID, spUUID, devlist, options=None):
+ domain = API.StorageDomain(sdUUID)
+ return domain.reduce(spUUID, devlist)
+
def domainFormat(self, sdUUID,
autoDetach=False, options=None):
domain = API.StorageDomain(sdUUID)
@@ -949,6 +953,7 @@
(self.domainDetach, 'detachStorageDomain'),
(self.domainDetachForced, 'forcedDetachStorageDomain'),
(self.domainExtend, 'extendStorageDomain'),
+ (self.domainReduce, 'reduceStorageDomain'),
(self.domainFormat, 'formatStorageDomain'),
(self.domainGetFileStats, 'getFileStats'),
(self.domainGetImages, 'getImagesList'),
diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py
index f807d3e..c31bb97 100644
--- a/vdsm/storage/blockSD.py
+++ b/vdsm/storage/blockSD.py
@@ -734,6 +734,10 @@
else:
return self.getFreeMetadataSlot(slotSize)
+ def reduce(self, devlist):
+ with self._extendlock:
+ lvm.reduceVG(self.sdUUID, devlist)
+
def _getOccupiedMetadataSlots(self):
stripPrefix = lambda s, pfx: s[len(pfx):]
occupiedSlots = []
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 0ebe129..93ec28c 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -753,6 +753,14 @@
pool.extendSD(sdUUID, dmDevs, force)
@public
+ def reduceStorageDomain(self, sdUUID, spUUID, guids):
+ vars.task.getSharedLock(STORAGE, sdUUID)
+ pool = self.getPool(spUUID)
+ dmDevs = tuple(os.path.join(devicemapper.DMPATH_PREFIX, guid) for guid
+ in guids)
+ pool.reduceSD(sdUUID, dmDevs)
+
+ @public
def forcedDetachStorageDomain(self, sdUUID, spUUID, options=None):
"""Forced detach a storage domain from a storage pool.
This removes the storage domain entry in the storage pool meta-data
diff --git a/vdsm/storage/lvm.py b/vdsm/storage/lvm.py
index d36c505..4b6015e 100644
--- a/vdsm/storage/lvm.py
+++ b/vdsm/storage/lvm.py
@@ -49,7 +49,7 @@
LVM_DEFAULT_TTL = 100
PV_FIELDS = ("uuid,name,size,vg_name,vg_uuid,pe_start,pe_count,"
- "pe_alloc_count,mda_count,dev_size")
+ "pe_alloc_count,mda_count,dev_size,pv_mda_used_count")
VG_FIELDS = ("uuid,name,attr,size,free,extent_size,extent_count,free_count,"
"tags,vg_mda_size,vg_mda_free,lv_count,pv_count,pv_name")
LV_FIELDS = "uuid,name,vg_name,attr,size,seg_start_pe,devices,tags"
@@ -966,6 +966,28 @@
raise se.VolumeGroupExtendError(vgName, pvs)
+def reduceVG(vgName, devices):
+ pvs = [pdev for pdev in _normalizeargs(devices)]
+ vgpvs = _lvminfo._getVGDevs((vgName,))
+
+ for pv in pvs:
+ if pv not in vgpvs:
+ raise se.BlockDeviceActionError(
+ 'Phisical device %s not in vg %s' % (pv, vgName))
+ if int(getPV(pv).pv_mda_used_count) != 0:
+ raise se.BlockDeviceActionError(
+ 'Phisical device %s contains the lvm metadata')
+
+ cmd = ["vgreduce", vgName] + pvs
+ rc, out, err = _lvminfo.cmd(cmd, vgpvs)
+ if rc == 0:
+ _lvminfo._invalidatepvs(vgpvs)
+ _lvminfo._invalidatevgs(vgName)
+ log.debug("Cache after reducing vg %s", _lvminfo._vgs)
+ else:
+ raise se.VolumeGroupReduceError(vgName, pvs)
+
+
def chkVG(vgName):
cmd = ["vgck", vgName]
rc, out, err = _lvminfo.cmd(cmd, _lvminfo._getVGDevs((vgName, )))
diff --git a/vdsm/storage/sp.py b/vdsm/storage/sp.py
index 338232f..f4d4987 100644
--- a/vdsm/storage/sp.py
+++ b/vdsm/storage/sp.py
@@ -1862,6 +1862,10 @@
self.validatePoolSD(sdUUID)
sdCache.produce(sdUUID).extend(devlist, force)
+ def reduceSD(self, sdUUID, devlist):
+ self.validatePoolSD(sdUUID)
+ sdCache.produce(sdUUID).reduce(devlist)
+
def setSDDescription(self, sd, description):
self.validatePoolSD(sd.sdUUID)
sd.setDescription(descr=description)
diff --git a/vdsm/storage/storage_exception.py b/vdsm/storage/storage_exception.py
index b4f3b66..226508a 100644
--- a/vdsm/storage/storage_exception.py
+++ b/vdsm/storage/storage_exception.py
@@ -1504,6 +1504,13 @@
issue and how to resolve it"""
+class VolumeGroupReduceError(StorageException):
+ def __init__(self, vgname, devname):
+ self.value = "vgname=%s, devname=%s" % (vgname, devname)
+ code = 503
+ message = "Cannot reduce Volume Group"
+
+
#################################################
# SPM/HSM Exceptions
#################################################
diff --git a/vdsm_api/vdsmapi-schema.json b/vdsm_api/vdsmapi-schema.json
index 31bd869..61fc66c 100644
--- a/vdsm_api/vdsmapi-schema.json
+++ b/vdsm_api/vdsmapi-schema.json
@@ -4169,6 +4169,24 @@
'devlist': ['str'], '*force': 'bool'}}
##
+# @StorageDomain.reduce:
+#
+# Reduce a block-based Storage Domain freeing block devices that are not in
+# use (no lvm metadata and lvs).
+#
+# @storagedomainID: The UUID of the Storage Domain
+#
+# @storagepoolID: The UUID of the Storage Pool
+#
+# @devlist: An array of block device names to remove from the domain
+#
+# Since: 4.14.0
+##
+{'command': {'class': 'StorageDomain', 'name':
'reduce'},
+ 'data': {'storagedomainID': 'UUID', 'storagepoolID':
'UUID',
+ 'devlist': ['str']}}
+
+##
# @StorageDomain.format:
#
# Format a storage domain and erase all of its data.
--
To view, visit
http://gerrit.ovirt.org/26574
To unsubscribe, visit
http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I467fc12d3787929b9c0e35f8806402f72f493368
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Federico Simoncelli <fsimonce(a)redhat.com>