Change in vdsm[master]: Avoid redundant volume produces.
by ewarszaw@redhat.com
Eduardo has uploaded a new change for review.
Change subject: Avoid redundant volume produces.
......................................................................
Avoid redundant volume produces.
Add sd.getVolumePath() returns the volume path without produce it.
Deprecating hsm.getVolumePath() and hsm.prepareVolume().
When removed, remove API.prepare(), BindingXMLRPC.volumePrepare(),
API.getPath, BindingXMLRPC.volumeGetPath(), etc.
Change-Id: I3ad53a7e8a66d7f9bdd62048f2bf1f722a490c5c
Signed-off-by: Eduardo <ewarszaw(a)redhat.com>
---
M vdsm/storage/fileSD.py
M vdsm/storage/hsm.py
M vdsm/storage/sd.py
3 files changed, 11 insertions(+), 6 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/91/17991/1
diff --git a/vdsm/storage/fileSD.py b/vdsm/storage/fileSD.py
index 9d1493d..8cbea23 100644
--- a/vdsm/storage/fileSD.py
+++ b/vdsm/storage/fileSD.py
@@ -302,8 +302,7 @@
Return the volume lease (leasePath, leaseOffset)
"""
if self.hasVolumeLeases():
- vol = self.produceVolume(imgUUID, volUUID)
- volumePath = vol.getVolumePath()
+ volumePath = self.getVolumePath(imgUUID, volUUID)
leasePath = volumePath + fileVolume.LEASE_FILEEXT
return leasePath, fileVolume.LEASE_FILEOFFSET
return None, None
@@ -426,8 +425,9 @@
# NFS volumes. In theory it is necessary to fix the permission
# of the leaf only but to not introduce an additional requirement
# (ordered volUUIDs) we fix them all.
- for vol in [self.produceVolume(imgUUID, x) for x in volUUIDs]:
- self.oop.fileUtils.copyUserModeToGroup(vol.getVolumePath())
+ for volUUID in volUUIDs:
+ volPath = self.getVolumePath(imgUUID, volUUID)
+ self.oop.fileUtils.copyUserModeToGroup(volPath)
@classmethod
def format(cls, sdUUID):
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index c754ee8..3545677 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -3076,6 +3076,7 @@
volUUID=volUUID).getInfo()
return dict(info=info)
+ @deprecated
@public
def getVolumePath(self, sdUUID, spUUID, imgUUID, volUUID, options=None):
"""
@@ -3100,8 +3101,7 @@
"""
vars.task.getSharedLock(STORAGE, sdUUID)
path = sdCache.produce(
- sdUUID=sdUUID).produceVolume(imgUUID=imgUUID,
- volUUID=volUUID).getVolumePath()
+ sdUUID=sdUUID).getVolumePath(imgUUID, volUUID)
return dict(path=path)
@public
@@ -3127,6 +3127,7 @@
if fails:
self.log.error("Failed to remove the following rules: %s", fails)
+ @deprecated
@public
def prepareVolume(self, sdUUID, spUUID, imgUUID, volUUID, rw=True,
options=None):
diff --git a/vdsm/storage/sd.py b/vdsm/storage/sd.py
index 36c4877..dde7832 100644
--- a/vdsm/storage/sd.py
+++ b/vdsm/storage/sd.py
@@ -640,6 +640,10 @@
# If it has a repo we don't have multiple domains. Assume single pool
return os.path.join(self.storage_repository, self.getPools()[0])
+ def getVolumePath(self, imgUUID, volUUID):
+ return os.path.join(self.mountpoint, self.sdUUID, 'images', imgUUID,
+ volUUID)
+
def getIsoDomainImagesDir(self):
"""
Get 'images' directory from Iso domain
--
To view, visit http://gerrit.ovirt.org/17991
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I3ad53a7e8a66d7f9bdd62048f2bf1f722a490c5c
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Eduardo <ewarszaw(a)redhat.com>
9 years, 9 months
Change in vdsm[master]: format storage domain using locking type 1
by ykaplan@redhat.com
Yeela Kaplan has uploaded a new change for review.
Change subject: format storage domain using locking type 1
......................................................................
format storage domain using locking type 1
Change-Id: Ifdaf54cb8d27d0e609943d38efb07a0a78b84394
Signed-off-by: Yeela Kaplan <ykaplan(a)redhat.com>
---
M vdsm/storage/blockSD.py
M vdsm/storage/hsm.py
M vdsm/storage/lvm.py
3 files changed, 19 insertions(+), 4 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/14/26014/1
diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py
index d9f398e..9d6ec43 100644
--- a/vdsm/storage/blockSD.py
+++ b/vdsm/storage/blockSD.py
@@ -906,7 +906,7 @@
for lv in lvs:
#Fix me: Should raise and get resource lock.
try:
- lvm.removeLVs(sdUUID, lv.name)
+ lvm.clusterSafeRemoveLVs(sdUUID, lv.name)
except se.CannotRemoveLogicalVolume as e:
cls.log.warning("Remove logical volume failed %s/%s %s",
sdUUID, lv.name, str(e))
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index e6d994f..7ea6b84 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -40,7 +40,7 @@
from vdsm.config import config
import sp
-from spbackends import MAX_POOL_DESCRIPTION_SIZE, MAX_DOMAINS
+from spbackends import MAX_POOL_DESCRIPTION_SIZE
from spbackends import StoragePoolDiskBackend
from spbackends import StoragePoolMemoryBackend
import domainMonitor
diff --git a/vdsm/storage/lvm.py b/vdsm/storage/lvm.py
index 4df2e6f..2cf5091 100644
--- a/vdsm/storage/lvm.py
+++ b/vdsm/storage/lvm.py
@@ -1114,7 +1114,7 @@
_completeCreateLV(rc, vgName, lvName, activate)
-def removeLVs(vgName, lvNames):
+def _constructRemoveLVs(vgName, lvNames):
lvNames = _normalizeargs(lvNames)
# Assert that the LVs are inactive before remove.
for lvName in lvNames:
@@ -1132,7 +1132,10 @@
cmd.extend(LVM_NOBACKUP)
for lvName in lvNames:
cmd.append("%s/%s" % (vgName, lvName))
- rc, out, err = _lvminfo.cmd(cmd, _lvminfo._getVGDevs((vgName, )))
+ return cmd
+
+
+def _completeRemoveLVs(rc, vgName, lvNames):
if rc == 0:
for lvName in lvNames:
# Remove the LV from the cache
@@ -1145,6 +1148,18 @@
raise se.CannotRemoveLogicalVolume(vgName, str(lvNames))
+def clusterSafeRemoveLVs(vgName, lvNames):
+ cmd = _constructRemoveLVs(vgName, lvNames)
+ rc, out, err = _lvminfo.cmd(cmd, _lvminfo._getVGDevs((vgName, )), rw=True)
+ _completeRemoveLVs(rc, vgName, lvNames)
+
+
+def removeLVs(vgName, lvNames):
+ cmd = _constructRemoveLVs(vgName, lvNames)
+ rc, out, err = _lvminfo.cmd(cmd, _lvminfo._getVGDevs((vgName, )))
+ _completeRemoveLVs(rc, vgName, lvNames)
+
+
def _resizeLV(op, vgName, lvName, size):
"""
Size units: MB (1024 ** 2 = 2 ** 20)B.
--
To view, visit http://gerrit.ovirt.org/26014
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ifdaf54cb8d27d0e609943d38efb07a0a78b84394
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Yeela Kaplan <ykaplan(a)redhat.com>
9 years, 9 months
Change in vdsm[master]: [WIP] destroy storage pool using command type 1
by ykaplan@redhat.com
Yeela Kaplan has uploaded a new change for review.
Change subject: [WIP] destroy storage pool using command type 1
......................................................................
[WIP] destroy storage pool using command type 1
Change-Id: I67cda9abd0bbc01d7d0642d5d3327f8687d7f728
Signed-off-by: Yeela Kaplan <ykaplan(a)redhat.com>
---
M vdsm/storage/blockSD.py
M vdsm/storage/sd.py
M vdsm/storage/sp.py
3 files changed, 29 insertions(+), 6 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/98/24398/1
diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py
index 799ee01..0bbe5dd 100644
--- a/vdsm/storage/blockSD.py
+++ b/vdsm/storage/blockSD.py
@@ -559,9 +559,9 @@
raise se.VolumesZeroingError(path)
if version in VERS_METADATA_LV:
- md = LvBasedSDMetadata(vgName, sd.METADATA)
+ LvBasedSDMetadata(vgName, sd.METADATA)
elif version in VERS_METADATA_TAG:
- md = TagBasedSDMetadata(vgName)
+ TagBasedSDMetadata(vgName)
logBlkSize, phyBlkSize = lvm.getVGBlockSizes(vgName)
@@ -1327,10 +1327,10 @@
vgName = vg.name
toAdd = encodeVgTags(leaseParams)
toAdd += encodeVgTags({sd.DMDK_POOLS: spUUID,
- sd.DMDK_ROLE: sd.MASTER_DOMAIN})
+ sd.DMDK_ROLE: sd.MASTER_DOMAIN})
toDel = encodeVgTags({sd.DMDK_ROLE: sd.REGULAR_DOMAIN,
- sd.DMDK_POOLS: spUUID,
- sd.DMDK_POOLS: ''})
+ sd.DMDK_POOLS: spUUID,
+ sd.DMDK_POOLS: ''})
lvm.changeVGTags(vgName, delTags=toDel, addTags=toAdd, safe=False)
def refreshDirTree(self):
@@ -1357,6 +1357,26 @@
finally:
self._extendlock.release()
+ def detachMaster(self, spUUID):
+ self.invalidateMetadata()
+ pools = self.getPools()
+ try:
+ pools.remove(spUUID)
+ except ValueError:
+ self.log.error(
+ "Can't remove pool %s from domain %s pool list %s, "
+ "it does not exist",
+ spUUID, self.sdUUID, str(pools))
+ return
+ vgUUID = self.getInfo()['vguuid']
+ vg = lvm.getVGbyUUID(vgUUID)
+ vgName = vg.name
+ toAdd = encodeVgTags({sd.DMDK_POOLS: '',
+ sd.DMDK_ROLE: sd.REGULAR_DOMAIN})
+ toDel = encodeVgTags({sd.DMDK_POOLS: spUUID,
+ sd.DMDK_ROLE: sd.MASTER_DOMAIN})
+ lvm.changeVGTags(vgName, delTags=toDel, addTags=toAdd, safe=False)
+
def refresh(self):
self.refreshDirTree()
lvm.invalidateVG(self.sdUUID)
diff --git a/vdsm/storage/sd.py b/vdsm/storage/sd.py
index 23ca112..6849545 100644
--- a/vdsm/storage/sd.py
+++ b/vdsm/storage/sd.py
@@ -524,6 +524,9 @@
# Last thing to do is to remove pool from domain
# do any required cleanup
+ def detachMaster(self, spUUID):
+ self.detach(spUUID)
+
# I personally don't think there is a reason to pack these
# but I already changed too much.
def changeLeaseParams(self, leaseParamPack):
diff --git a/vdsm/storage/sp.py b/vdsm/storage/sp.py
index d228a9d..6ec941d 100644
--- a/vdsm/storage/sp.py
+++ b/vdsm/storage/sp.py
@@ -963,7 +963,7 @@
# Forced detach master domain
self.forcedDetachSD(self.masterDomain.sdUUID)
- self.masterDomain.detach(self.spUUID)
+ self.masterDomain.detachMaster(self.spUUID)
@unsecured
def _convertDomain(self, domain, targetFormat=None):
--
To view, visit http://gerrit.ovirt.org/24398
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I67cda9abd0bbc01d7d0642d5d3327f8687d7f728
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Yeela Kaplan <ykaplan(a)redhat.com>
9 years, 9 months
Change in vdsm[master]: [WIP] Create storage pool using command type 1
by ykaplan@redhat.com
Yeela Kaplan has uploaded a new change for review.
Change subject: [WIP] Create storage pool using command type 1
......................................................................
[WIP] Create storage pool using command type 1
Change-Id: Ia64f6dd2df38d2968f03ce66094f3ba7b4343503
Signed-off-by: Yeela Kaplan <ykaplan(a)redhat.com>
---
M vdsm/storage/blockSD.py
M vdsm/storage/hsm.py
M vdsm/storage/lvm.py
M vdsm/storage/sd.py
M vdsm/storage/sp.py
5 files changed, 71 insertions(+), 74 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/47/23647/1
diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py
index 7980c80..bb7f365 100644
--- a/vdsm/storage/blockSD.py
+++ b/vdsm/storage/blockSD.py
@@ -92,6 +92,12 @@
VERS_METADATA_TAG = (2, 3)
+def encodeVgTags(tagsDict):
+ return [VGTagMetadataRW.METADATA_TAG_PREFIX +
+ lvmTagEncode("%s=%s" % (k, v))
+ for k, v in tagsDict.items()]
+
+
def encodePVInfo(pvInfo):
return (
"pv:%s," % pvInfo["guid"] +
@@ -130,6 +136,13 @@
def lvmTagDecode(s):
return LVM_ENC_ESCAPE.sub(lambda c: unichr(int(c.groups()[0])), s)
+
+
+def encodeVgTags(tagsDict):
+ tags = [VGTagMetadataRW.METADATA_TAG_PREFIX +
+ lvmTagEncode("%s=%s" % (k, v))
+ for k, v in tagsDict.items()]
+ return tuple(tags)
def _tellEnd(devPath):
@@ -523,7 +536,7 @@
# least SDMETADATA/METASIZE units, we know we can use the first
# SDMETADATA bytes of the metadata volume for the SD metadata.
# pass metadata's dev to ensure it is the first mapping
- mapping = cls.getMetaDataMapping(vgName)
+ #mapping = cls.getMetaDataMapping(vgName)
# Create the rest of the BlockSD internal volumes
lvm.createLV(vgName, sd.LEASES, sd.LEASES_SIZE, safe=False)
@@ -558,6 +571,7 @@
logBlkSize, phyBlkSize = lvm.getVGBlockSizes(vgName)
+ mapping = cls.getMetaDataMapping(vgName)
# create domain metadata
# FIXME : This is 99% like the metadata in file SD
# Do we really need to keep the VGUUID?
@@ -565,11 +579,11 @@
initialMetadata = {
sd.DMDK_VERSION: version,
sd.DMDK_SDUUID: sdUUID,
- sd.DMDK_TYPE: storageType,
- sd.DMDK_CLASS: domClass,
+ sd.DMDK_TYPE: sd.storageType(storageType),
+ sd.DMDK_CLASS: sd.class2name(domClass),
sd.DMDK_DESCRIPTION: domainName,
sd.DMDK_ROLE: sd.REGULAR_DOMAIN,
- sd.DMDK_POOLS: [],
+ sd.DMDK_POOLS: '',
sd.DMDK_LOCK_POLICY: '',
sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC: sd.DEFAULT_LEASE_PARAMS[
sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC],
@@ -585,8 +599,8 @@
}
initialMetadata.update(mapping)
-
- md.update(initialMetadata)
+ toAdd = encodeVgTags(initialMetadata)
+ lvm.changeVGTags(vgName, delTags=(), addTags=toAdd, safe=False)
# Mark VG with Storage Domain Tag
try:
@@ -1302,6 +1316,22 @@
# It is time to deactivate the master LV now
lvm.deactivateLVs(self.sdUUID, MASTERLV)
+ def initMasterParams(self, poolMD, params):
+ vgUUID = self.getInfo()['vguuid']
+ vg = lvm.getVGbyUUID(vgUUID)
+ vgName = vg.name
+ toAdd = encodeVgTags(params)
+ lvm.changeVGTags(vgName, addTags=toAdd, safe=False)
+
+ def setMasterDomainParams(self, spUUID, leaseParams):
+ vgUUID = self.getInfo()['vguuid']
+ vg = lvm.getVGbyUUID(vgUUID)
+ vgName = vg.name
+ toAdd = encodeVgTags(leaseParams)
+ toAdd += encodeVgTags({sd.DMDK_POOLS: [spUUID],
+ sd.DMDK_ROLE: sd.MASTER_DOMAIN})
+ lvm.changeVGTags(vgName, delTags=(), addTags=toAdd, safe=False)
+
def refreshDirTree(self):
# create domain images folder
imagesPath = os.path.join(self.domaindir, sd.DOMAIN_IMAGES)
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 5c73dd9..ff27d53 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -942,35 +942,15 @@
if masterDom not in domList:
raise se.InvalidParameterException("masterDom", str(masterDom))
+ if len(domList) > 1:
+ raise NotImplementedError("Create storage pool "
+ "only with master domain")
+
if len(poolName) > sp.MAX_POOL_DESCRIPTION_SIZE:
raise se.StoragePoolDescriptionTooLongError()
- msd = sdCache.produce(sdUUID=masterDom)
- msdType = msd.getStorageType()
- msdVersion = msd.getVersion()
- if (msdType in sd.BLOCK_DOMAIN_TYPES and
- msdVersion in blockSD.VERS_METADATA_LV and
- len(domList) > sp.MAX_DOMAINS):
- raise se.TooManyDomainsInStoragePoolError()
-
- for sdUUID in domList:
- try:
- dom = sdCache.produce(sdUUID=sdUUID)
- # TODO: consider removing validate() from here, as the domains
- # are going to be accessed much later, and may loose validity
- # until then.
- dom.validate()
- except:
- raise se.StorageDomainAccessError(sdUUID)
- # If you remove this condition, remove it from
- # StoragePool.attachSD() too.
- if dom.isData() and (dom.getVersion() > msdVersion):
- raise se.MixedSDVersionError(dom.sdUUID, dom.getVersion(),
- msd.sdUUID, msdVersion)
-
vars.task.getExclusiveLock(STORAGE, spUUID)
- for dom in sorted(domList):
- vars.task.getExclusiveLock(STORAGE, dom)
+ vars.task.getExclusiveLock(STORAGE, masterDom)
return sp.StoragePool(spUUID, self.domainMonitor, self.taskMng).create(
poolName, masterDom, domList, masterVersion, leaseParams)
diff --git a/vdsm/storage/lvm.py b/vdsm/storage/lvm.py
index 0f96df6..c1a0b92 100644
--- a/vdsm/storage/lvm.py
+++ b/vdsm/storage/lvm.py
@@ -302,7 +302,7 @@
if rc != 0:
# Filter might be stale
self.invalidateFilter()
- newCmd = self._addExtraCfg(cmd, safe)
+ newCmd = self._addExtraCfg(cmd, tuple(), safe)
# Before blindly trying again make sure
# that the commands are not identical, because
# the devlist is sorted there is no fear
diff --git a/vdsm/storage/sd.py b/vdsm/storage/sd.py
index 7f00533..c968d7b 100644
--- a/vdsm/storage/sd.py
+++ b/vdsm/storage/sd.py
@@ -766,6 +766,15 @@
def isMaster(self):
return self.getMetaParam(DMDK_ROLE).capitalize() == MASTER_DOMAIN
+ @classmethod
+ def initMasterParams(cls, poolMD, params):
+ poolMD.update(params)
+
+ def setMasterDomainParams(self, spUUID, leaseParams):
+ self.changeLeaseParams(leaseParams)
+ self.setMetaParam(DMDK_POOLS, [spUUID])
+ self.changeRole(MASTER_DOMAIN)
+
def initMaster(self, spUUID, leaseParams):
self.invalidateMetadata()
pools = self.getPools()
@@ -774,9 +783,7 @@
raise se.StorageDomainAlreadyAttached(pools[0], self.sdUUID)
with self._metadata.transaction():
- self.changeLeaseParams(leaseParams)
- self.setMetaParam(DMDK_POOLS, [spUUID])
- self.changeRole(MASTER_DOMAIN)
+ self.setMasterDomainParams(spUUID, leaseParams)
def isISO(self):
return self.getMetaParam(DMDK_CLASS) == ISO_DOMAIN
diff --git a/vdsm/storage/sp.py b/vdsm/storage/sp.py
index 50e29ef..0b00264 100644
--- a/vdsm/storage/sp.py
+++ b/vdsm/storage/sp.py
@@ -588,9 +588,8 @@
@unsecured
def create(self, poolName, msdUUID, domList, masterVersion, leaseParams):
"""
- Create new storage pool with single/multiple image data domain.
- The command will create new storage pool meta-data attach each
- storage domain to that storage pool.
+ Create new storage pool with single image data domain.
+ The command will create new storage pool meta-data
At least one data (images) domain must be provided
'poolName' - storage pool name
'msdUUID' - master domain of this pool (one of domList)
@@ -600,27 +599,20 @@
"masterVersion=%s %s", self.spUUID, poolName, msdUUID,
domList, masterVersion, leaseParams)
- if msdUUID not in domList:
- raise se.InvalidParameterException("masterDomain", msdUUID)
+ # Check the master domain before pool creation
+ try:
+ msd = sdCache.produce(msdUUID)
+ msd.validate()
+ except se.StorageException:
+ self.log.error("Unexpected error", exc_info=True)
+ raise se.StorageDomainAccessError(msdUUID)
- # Check the domains before pool creation
- for sdUUID in domList:
- try:
- domain = sdCache.produce(sdUUID)
- domain.validate()
- if sdUUID == msdUUID:
- msd = domain
- except se.StorageException:
- self.log.error("Unexpected error", exc_info=True)
- raise se.StorageDomainAccessError(sdUUID)
-
- # Validate unattached domains
- if not domain.isISO():
- domain.invalidateMetadata()
- spUUIDs = domain.getPools()
- # Non ISO domains have only 1 pool
- if len(spUUIDs) > 0:
- raise se.StorageDomainAlreadyAttached(spUUIDs[0], sdUUID)
+ # Validate unattached domains
+ msd.invalidateMetadata()
+ spUUIDs = msd.getPools()
+ # Non ISO domains have only 1 pool
+ if len(spUUIDs) > 0:
+ raise se.StorageDomainAlreadyAttached(spUUIDs[0], msdUUID)
fileUtils.createdir(self.poolPath)
self._acquireTemporaryClusterLock(msdUUID, leaseParams)
@@ -629,23 +621,10 @@
self._setSafe()
# Mark 'master' domain
# We should do it before actually attaching this domain to the pool
- # During 'master' marking we create pool metadata and each attached
- # domain should register there
+ # During 'master' marking we create pool metadata
self.createMaster(poolName, msd, masterVersion, leaseParams)
self.__rebuild(msdUUID=msdUUID, masterVersion=masterVersion)
- # Attach storage domains to the storage pool
- # Since we are creating the pool then attach is done from the hsm
- # and not the spm therefore we must manually take the master domain
- # lock
- # TBD: create will receive only master domain and further attaches
- # should be done under SPM
- # Master domain was already attached (in createMaster),
- # no need to reattach
- for sdUUID in domList:
- # No need to attach the master
- if sdUUID != msdUUID:
- self.attachSD(sdUUID)
except Exception:
self.log.error("Create pool %s canceled ", poolName, exc_info=True)
try:
@@ -716,13 +695,14 @@
@unsecured
def initParameters(self, poolName, domain, masterVersion):
- self._getPoolMD(domain).update({
+ params = {
PMDK_SPM_ID: SPM_ID_FREE,
PMDK_LVER: LVER_INVALID,
PMDK_MASTER_VER: masterVersion,
PMDK_POOL_DESCRIPTION: poolName,
PMDK_DOMAINS: {domain.sdUUID: sd.DOM_ACTIVE_STATUS},
- })
+ }
+ domain.initMasterParams(self._getPoolMD(domain), params)
@unsecured
def createMaster(self, poolName, domain, masterVersion, leaseParams):
--
To view, visit http://gerrit.ovirt.org/23647
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ia64f6dd2df38d2968f03ce66094f3ba7b4343503
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Yeela Kaplan <ykaplan(a)redhat.com>
9 years, 9 months
Change in vdsm[master]: [WIP] Create storage domain using command type 1
by ykaplan@redhat.com
Yeela Kaplan has uploaded a new change for review.
Change subject: [WIP] Create storage domain using command type 1
......................................................................
[WIP] Create storage domain using command type 1
All bootstrap operaions are executed using command type 1.
Change-Id: I127af299086ec5572d29686451d4892c9ff0330d
Signed-off-by: Yeela Kaplan <ykaplan(a)redhat.com>
---
M vdsm/storage/blockSD.py
M vdsm/storage/lvm.py
2 files changed, 15 insertions(+), 14 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/46/23646/1
diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py
index 55bd796..7980c80 100644
--- a/vdsm/storage/blockSD.py
+++ b/vdsm/storage/blockSD.py
@@ -517,7 +517,7 @@
# Create metadata service volume
metasize = cls.metaSize(vgName)
- lvm.createLV(vgName, sd.METADATA, "%s" % (metasize))
+ lvm.createLV(vgName, sd.METADATA, "%s" % (metasize), safe=False)
# Create the mapping right now so the index 0 is guaranteed
# to belong to the metadata volume. Since the metadata is at
# least SDMETADATA/METASIZE units, we know we can use the first
@@ -526,11 +526,11 @@
mapping = cls.getMetaDataMapping(vgName)
# Create the rest of the BlockSD internal volumes
- lvm.createLV(vgName, sd.LEASES, sd.LEASES_SIZE)
- lvm.createLV(vgName, sd.IDS, sd.IDS_SIZE)
- lvm.createLV(vgName, sd.INBOX, sd.INBOX_SIZE)
- lvm.createLV(vgName, sd.OUTBOX, sd.OUTBOX_SIZE)
- lvm.createLV(vgName, MASTERLV, MASTERLV_SIZE)
+ lvm.createLV(vgName, sd.LEASES, sd.LEASES_SIZE, safe=False)
+ lvm.createLV(vgName, sd.IDS, sd.IDS_SIZE, safe=False)
+ lvm.createLV(vgName, sd.INBOX, sd.INBOX_SIZE, safe=False)
+ lvm.createLV(vgName, sd.OUTBOX, sd.OUTBOX_SIZE, safe=False)
+ lvm.createLV(vgName, MASTERLV, MASTERLV_SIZE, safe=False)
# Create VMS file system
_createVMSfs(os.path.join("/dev", vgName, MASTERLV))
@@ -591,7 +591,7 @@
# Mark VG with Storage Domain Tag
try:
lvm.replaceVGTag(vgName, STORAGE_UNREADY_DOMAIN_TAG,
- STORAGE_DOMAIN_TAG)
+ STORAGE_DOMAIN_TAG, safe=False)
except se.StorageException:
raise se.VolumeGroupUninitialized(vgName)
diff --git a/vdsm/storage/lvm.py b/vdsm/storage/lvm.py
index 932d69e..0f96df6 100644
--- a/vdsm/storage/lvm.py
+++ b/vdsm/storage/lvm.py
@@ -257,7 +257,7 @@
return self._extraCfg
- def _addExtraCfg(self, cmd, devices=tuple(), safe):
+ def _addExtraCfg(self, cmd, devices=tuple(), safe=True):
newcmd = [constants.EXT_LVM, cmd[0]]
if devices:
conf = _buildConfig(devices)
@@ -656,6 +656,7 @@
globals()["_current_lvmconf"] = _current_lvmconf.replace("locking_type=4",
"locking_type=1")
log.debug("### _current_lvmconf %s", globals()["_current_lvmconf"])
+
def bootstrap(refreshlvs=()):
"""
@@ -1061,7 +1062,7 @@
def createLV(vgName, lvName, size, activate=True, contiguous=False,
- initialTag=None):
+ initialTag=None, safe=True):
"""
Size units: MB (1024 ** 2 = 2 ** 20)B.
"""
@@ -1078,7 +1079,7 @@
if initialTag is not None:
cmd.extend(("--addtag", initialTag))
cmd.extend(("--name", lvName, vgName))
- rc, out, err = _lvminfo.cmd(cmd, _lvminfo._getVGDevs((vgName, )))
+ rc, out, err = _lvminfo.cmd(cmd, _lvminfo._getVGDevs((vgName, )), safe)
if rc == 0:
_lvminfo._invalidatevgs(vgName)
@@ -1280,7 +1281,7 @@
return os.path.exists(lvPath(vgName, lvName))
-def changeVGTags(vgName, delTags=(), addTags=()):
+def changeVGTags(vgName, delTags=(), addTags=(), safe=True):
delTags = set(delTags)
addTags = set(addTags)
if delTags.intersection(addTags):
@@ -1296,7 +1297,7 @@
cmd.extend(("--addtag", tag))
cmd.append(vgName)
- rc, out, err = _lvminfo.cmd(cmd, _lvminfo._getVGDevs((vgName, )))
+ rc, out, err = _lvminfo.cmd(cmd, _lvminfo._getVGDevs((vgName, )), safe)
_lvminfo._invalidatevgs(vgName)
if rc != 0:
raise se.VolumeGroupReplaceTagError(
@@ -1321,8 +1322,8 @@
raise se.VolumeGroupRemoveTagError(vgName)
-def replaceVGTag(vg, oldTag, newTag):
- changeVGTags(vg, [oldTag], [newTag])
+def replaceVGTag(vg, oldTag, newTag, safe=True):
+ changeVGTags(vg, [oldTag], [newTag], safe)
def addVGTags(vgName, tags):
--
To view, visit http://gerrit.ovirt.org/23646
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I127af299086ec5572d29686451d4892c9ff0330d
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Yeela Kaplan <ykaplan(a)redhat.com>
9 years, 9 months
Change in vdsm[master]: [WIP] Towards a more (block) secure HSM.
by ewarszaw@redhat.com
Eduardo has uploaded a new change for review.
Change subject: [WIP] Towards a more (block) secure HSM.
......................................................................
[WIP] Towards a more (block) secure HSM.
Change-Id: I30df4ee5cdb6b44cf14d8cb155436aac7442a07d
---
M vdsm/storage/hsm.py
M vdsm/storage/lvm.py
M vdsm/storage/sp.py
3 files changed, 25 insertions(+), 5 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/18/2218/1
--
To view, visit http://gerrit.ovirt.org/2218
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I30df4ee5cdb6b44cf14d8cb155436aac7442a07d
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Eduardo <ewarszaw(a)redhat.com>
9 years, 9 months
Change in vdsm[master]: hsm.py: volUUID is not member of HSM class
by lbednar@redhat.com
Lukas Bednar has uploaded a new change for review.
Change subject: hsm.py: volUUID is not member of HSM class
......................................................................
hsm.py: volUUID is not member of HSM class
Change-Id: I5adc1fb40d54b7a887a4aba44c8884c876d5613a
Signed-off-by: Lukas Bednar <lbednar(a)redhat.com>
---
M vdsm/storage/hsm.py
1 file changed, 1 insertion(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/13/24313/1
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 9420931..41917f3 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -692,7 +692,7 @@
volFormat = volToExtend.getFormat()
if not volToExtend.isLeaf():
- raise se.VolumeNonWritable(self.volUUID)
+ raise se.VolumeNonWritable(volUUID)
if volFormat != volume.COW_FORMAT:
# This method is used only with COW volumes (see docstring),
--
To view, visit http://gerrit.ovirt.org/24313
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I5adc1fb40d54b7a887a4aba44c8884c876d5613a
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Lukas Bednar <lbednar(a)redhat.com>
9 years, 9 months
Change in vdsm[master]: WIP tests: add VM startup tests
by fromani@redhat.com
Francesco Romani has uploaded a new change for review.
Change subject: WIP tests: add VM startup tests
......................................................................
WIP tests: add VM startup tests
Add more tests addressing the VM startup and XML processing.
Those tests will be used for the planned split/refactoring of XML
processing and device handling.
Change-Id: Ia647d207bd30b6adc55e25e67198470eacd2144e
Signed-off-by: Francesco Romani <fromani(a)redhat.com>
---
M tests/vmTests.py
M tests/vmTestsData.py
2 files changed, 186 insertions(+), 4 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/57/26257/1
diff --git a/tests/vmTests.py b/tests/vmTests.py
index 552a1a7..277b959 100644
--- a/tests/vmTests.py
+++ b/tests/vmTests.py
@@ -27,6 +27,7 @@
from virt import vm
from virt import vmexitreason
+from virt import vmstatus
from vdsm import constants
from vdsm import define
from testrunner import VdsmTestCase as TestCaseBase
@@ -37,12 +38,21 @@
from monkeypatch import MonkeyPatch, MonkeyPatchScope
from vmTestsData import CONF_TO_DOMXML_X86_64
from vmTestsData import CONF_TO_DOMXML_PPC64
+from vmTestsData import VM_PARAMS
-class ConnectionMock:
- def domainEventRegisterAny(self, *arg):
+class ConnectionMock(object):
+ XML = ''
+ def createXML(self, domxml, flags):
+ class FakeDom(object):
+ def __init__(self, xml=''):
+ self.xml = xml
+ def XMLDesc(self, *args):
+ return self.xml if self.xml else self.XML
+ return FakeDom(domxml)
+
+ def domainEventRegisterAny(self, *args):
pass
-
class TestVm(TestCaseBase):
@@ -696,6 +706,23 @@
self.assertBuildCmdLine(CONF_TO_DOMXML_PPC64)
+class FakeIRS(object):
+ def getVolumeSize(self, *args, **kwargs):
+ return {
+ 'status': { 'code': 0 },
+ 'truesize': 0,
+ 'apparentsize': 0
+ }
+
+
+class FakeClientIF(object):
+ def __init__(self):
+ self.irs = FakeIRS()
+
+ def prepareVolumePath(self, *args, **kwargs):
+ return ''
+
+
@contextmanager
def FakeVM(params=None):
with namedTemporaryDir() as tmpDir:
@@ -704,7 +731,7 @@
lambda x: ConnectionMock())]):
vmParams = {'vmId': 'TESTING'}
vmParams.update({} if params is None else params)
- yield vm.Vm(None, vmParams)
+ yield vm.Vm(FakeClientIF(), vmParams)
@expandPermutations
@@ -799,3 +826,10 @@
stats = fake.getStats()
self.assertEqual(stats['exitReason'], exitReason)
self.assertEqual(stats['exitMessage'], msg)
+
+
+class TestVmStartup(TestCaseBase):
+ def testParamsCreate(self):
+ with FakeVM(VM_PARAMS[0]) as vm:
+ vm._startUnderlyingVm()
+ self.assertEqual(vm.lastStatus, vmstatus.UP)
diff --git a/tests/vmTestsData.py b/tests/vmTestsData.py
index 80af18e..3b3c6d0 100644
--- a/tests/vmTestsData.py
+++ b/tests/vmTestsData.py
@@ -151,3 +151,151 @@
</qemu:commandline>
</domain>
""", )]
+
+
+VM_PARAMS = [{
+ 'acpiEnable': 'true',
+ 'emulatedMachine': 'pc-1.0',
+ 'vmId': '2f50cbb4-f80f-4761-bb0d-1d351f497af7',
+ 'memGuaranteedSize': 1024,
+ 'spiceSslCipherSuite': 'DEFAULT',
+ 'timeOffset': '0',
+ 'displayPort': '-1',
+ 'displaySecurePort': '-1',
+ 'cpuType': 'Conroe',
+ 'smp': '1',
+ 'custom': {
+ 'device_7a09b642-7018-43b8-9938-33432b99cdc1device_dce622cc-29fa-431d-b10f-677903ffebbadevice_156942ac-5c99-496b-8c6a-3ffab96c3164device_e2e8af70-760d-4d07-b27e-cbbcb638affb':
+ 'VmDevice {vmId=2f50cbb4-f80f-4761-bb0d-1d351f497af7, deviceId=e2e8af70-760d-4d07-b27e-cbbcb638affb, device=ide, type=CONTROLLER, bootOrder=0, specParams={}, address={bus=0x00, domain=0x0000, type=pci, slot=0x01, function=0x1}, managed=false, plugged=true, readOnly=false, deviceAlias=ide0, customProperties={}, snapshotId=null}',
+ 'device_7a09b642-7018-43b8-9938-33432b99cdc1':
+ 'VmDevice {vmId=2f50cbb4-f80f-4761-bb0d-1d351f497af7, deviceId=7a09b642-7018-43b8-9938-33432b99cdc1, device=unix, type=CHANNEL, bootOrder=0, specParams={}, address={port=1, bus=0, controller=0, type=virtio-serial}, managed=false, plugged=true, readOnly=false, deviceAlias=channel0, customProperties={}, snapshotId=null}',
+ 'device_7a09b642-7018-43b8-9938-33432b99cdc1device_dce622cc-29fa-431d-b10f-677903ffebbadevice_156942ac-5c99-496b-8c6a-3ffab96c3164device_e2e8af70-760d-4d07-b27e-cbbcb638affbdevice_87060fa4-d4d9-4140-9915-73babd76fc12':
+ 'VmDevice {vmId=2f50cbb4-f80f-4761-bb0d-1d351f497af7, deviceId=87060fa4-d4d9-4140-9915-73babd76fc12, device=virtio-serial, type=CONTROLLER, bootOrder=0, specParams={}, address={bus=0x00, domain=0x0000, type=pci, slot=0x04, function=0x0}, managed=false, plugged=true, readOnly=false, deviceAlias=virtio-serial0, customProperties={}, snapshotId=null}',
+ 'device_7a09b642-7018-43b8-9938-33432b99cdc1device_dce622cc-29fa-431d-b10f-677903ffebbadevice_156942ac-5c99-496b-8c6a-3ffab96c3164':
+ 'VmDevice {vmId=2f50cbb4-f80f-4761-bb0d-1d351f497af7, deviceId=156942ac-5c99-496b-8c6a-3ffab96c3164, device=spicevmc, type=CHANNEL, bootOrder=0, specParams={}, address={port=3, bus=0, controller=0, type=virtio-serial}, managed=false, plugged=true, readOnly=false, deviceAlias=channel2, customProperties={}, snapshotId=null}',
+ 'device_7a09b642-7018-43b8-9938-33432b99cdc1device_dce622cc-29fa-431d-b10f-677903ffebba':
+ 'VmDevice {vmId=2f50cbb4-f80f-4761-bb0d-1d351f497af7, deviceId=dce622cc-29fa-431d-b10f-677903ffebba, device=unix, type=CHANNEL, bootOrder=0, specParams={}, address={port=2, bus=0, controller=0, type=virtio-serial}, managed=false, plugged=true, readOnly=false, deviceAlias=channel1, customProperties={}, snapshotId=null}'
+ },
+ 'vmType': 'kvm',
+ 'memSize': 10240,
+ 'smpCoresPerSocket': '1',
+ 'vmName': 'satelit',
+ 'nice': '0',
+ 'smartcardEnable': 'false',
+ 'keyboardLayout': 'en-us',
+ 'kvmEnable': 'true',
+ 'pitReinjection': 'false',
+ 'transparentHugePages': 'true',
+ 'devices': [ {
+ 'device': 'qxl',
+ 'specParams': {
+ 'vram': '32768',
+ 'ram': '65536',
+ 'heads': '1'
+ },
+ 'type': 'video',
+ 'deviceId': '21fbf3bc-a4e0-4706-a2f2-af14f3433a2d',
+ 'address': {
+ 'slot': '0x02',
+ 'bus': '0x00',
+ 'domain': '0x0000',
+ 'type': 'pci',
+ 'function': '0x0'
+ }
+ }, {
+ 'index': '2',
+ 'iface': 'ide',
+ 'bootOrder': '2',
+ 'specParams': {
+ 'path': ''
+ },
+ 'readonly': 'true',
+ 'deviceId': 'af3420c5-b179-4761-ac69-85831cee3b49',
+ 'address': {
+ 'bus': '1',
+ 'controller': '0',
+ 'type': 'drive',
+ 'target': '0',
+ 'unit': '0'
+ },
+ 'device': 'cdrom',
+ 'shared': 'false',
+ 'path': '',
+ 'type': 'disk'
+ }, {
+ 'index': 0,
+ 'iface': 'virtio',
+ 'format': 'raw',
+ 'bootOrder': '1',
+ 'poolID': '5849b030-626e-47cb-ad90-3ce782d831b3',
+ 'volumeID': '82231f1f-2c7a-4aee-a596-af209fe5e081',
+ 'imageID': 'faa6b382-eaa7-48d4-80fb-3e5999e1a167',
+ 'specParams': {},
+ 'readonly': 'false',
+ 'domainID': '041e8dbe-f405-4aca-bb78-3bf3cc7dc190',
+ 'optional': 'false',
+ 'deviceId': 'faa6b382-eaa7-48d4-80fb-3e5999e1a167',
+ 'address': {
+ 'slot': '0x05',
+ 'bus': '0x00',
+ 'domain': '0x0000',
+ 'type': 'pci',
+ 'function': '0x0'
+ },
+ 'device': 'disk',
+ 'shared': 'false',
+ 'propagateErrors': 'off',
+ 'type': 'disk'
+ }, {
+ 'nicModel': 'pv',
+ 'macAddr': '00:1a:4a:69:90:dd',
+ 'linkActive': 'true',
+ 'network': 'VPO_IPPROXY',
+ 'filter': 'vdsm-no-mac-spoofing',
+ 'specParams': {
+ 'inbound': {
+ 'average': 25600,
+ 'peak': 26880,
+ 'burst': 256000
+ },
+ 'outbound': {
+ 'average': 25600,
+ 'peak': 26880,
+ 'burst': 256000
+ }
+ },
+ 'deviceId': '10591bf3-0131-480a-8f89-5b8db7d49cd8',
+ 'address': {
+ 'slot': '0x07',
+ 'bus': '0x00',
+ 'domain': '0x0000',
+ 'type': 'pci',
+ 'function': '0x0'
+ },
+ 'device': 'bridge',
+ 'type': 'interface'
+ }, {
+ 'device': 'memballoon',
+ 'specParams': {
+ 'model': 'virtio'
+ },
+ 'type': 'balloon',
+ 'deviceId': 'b9561cf2-37f0-4e64-bc7b-8c1921c06f9b'
+ }, {
+ 'index': '0',
+ 'specParams': {},
+ 'deviceId': 'd02fc85d-4862-4e0f-a7c5-eadce3509935',
+ 'address': {
+ 'slot': '0x03',
+ 'bus': '0x00',
+ 'domain': '0x0000',
+ 'type': 'pci',
+ 'function': '0x0'
+ },
+ 'device': 'scsi',
+ 'model': 'virtio-scsi',
+ 'type': 'controller'
+ }],
+ 'spiceSecureChannels': 'smain,sinputs,scursor,splayback,srecord,sdisplay,susbredir,ssmartcard',
+ 'display': 'qxl'
+},]
--
To view, visit http://gerrit.ovirt.org/26257
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ia647d207bd30b6adc55e25e67198470eacd2144e
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Francesco Romani <fromani(a)redhat.com>
9 years, 9 months
Change in vdsm[master]: volumeTests: add BlockDomainMetadataSlotTest
by Federico Simoncelli
Federico Simoncelli has uploaded a new change for review.
Change subject: volumeTests: add BlockDomainMetadataSlotTest
......................................................................
volumeTests: add BlockDomainMetadataSlotTest
A new test has been added to check the metadata slot selection.
Change-Id: I0ff018625443ce7cf75d3edf11644544e7f23dde
Signed-off-by: Federico Simoncelli <fsimonce(a)redhat.com>
---
M tests/volumeTests.py
1 file changed, 37 insertions(+), 5 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/66/26266/1
diff --git a/tests/volumeTests.py b/tests/volumeTests.py
index 6e4a3b2..5b6476a 100644
--- a/tests/volumeTests.py
+++ b/tests/volumeTests.py
@@ -24,7 +24,9 @@
from testrunner import VdsmTestCase as TestCaseBase
-from storage import outOfProcess, fileSD
+from storage import blockSD, blockVolume, fileSD, outOfProcess
+
+SDBLKSZ = 512
class FileDomainMockObject(fileSD.FileStorageDomain):
@@ -40,7 +42,6 @@
class FileVolumeGetVSizeTest(TestCaseBase):
VOLSIZE = 1024
- SDBLKSZ = 512
def setUp(self):
self.mountpoint = tempfile.mkdtemp()
@@ -54,13 +55,44 @@
volPath = os.path.join(imgPath, self.volUUID)
os.makedirs(imgPath)
- open(volPath, "w").truncate(self.VOLSIZE * self.SDBLKSZ)
+ open(volPath, "w").truncate(self.VOLSIZE * SDBLKSZ)
self.sdobj = FileDomainMockObject(self.mountpoint, self.sdUUID)
def tearDown(self):
shutil.rmtree(self.mountpoint)
def test(self):
- volSize = int(self.sdobj.getVSize(self.imgUUID, self.volUUID) /
- self.SDBLKSZ)
+ volSize = int(
+ self.sdobj.getVSize(self.imgUUID, self.volUUID) / SDBLKSZ)
assert volSize == self.VOLSIZE
+
+
+class BlockDomainMockObject(blockSD.BlockStorageDomain):
+ DOMAIN_VERSION = 3
+
+ def __init__(self, sdUUID, occupiedMetadataSlots=None):
+ self.sdUUID = sdUUID
+ self.stat = None
+ self.logBlkSize = SDBLKSZ
+ self.occupiedMetadataSlots = occupiedMetadataSlots
+
+ def getVersion(self):
+ return self.DOMAIN_VERSION
+
+ def _getOccupiedMetadataSlots(self):
+ return self.occupiedMetadataSlots
+
+
+class BlockDomainMetadataSlotTest(TestCaseBase):
+ OCCUPIED_METADATA_SLOTS = [(4, 1), (7, 1)]
+ EXPECTED_METADATA_SLOT = 5
+
+ def setUp(self):
+ self.sdUUID = str(uuid.uuid4())
+ self.blksd = BlockDomainMockObject(self.sdUUID,
+ self.OCCUPIED_METADATA_SLOTS)
+
+ def testMetaSlotSelection(self):
+ with blockVolume.BlockVolume._tagCreateLock:
+ mdSlot = self.blksd.getVolumeMetadataSlot(None, 1)
+ self.assertEqual(mdSlot, self.EXPECTED_METADATA_SLOT)
--
To view, visit http://gerrit.ovirt.org/26266
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I0ff018625443ce7cf75d3edf11644544e7f23dde
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Federico Simoncelli <fsimonce(a)redhat.com>
9 years, 9 months
Change in vdsm[master]: sysprep hook: unconfigure a vm clone
by Dan Kenigsberg
Dan Kenigsberg has uploaded a new change for review.
Change subject: sysprep hook: unconfigure a vm clone
......................................................................
sysprep hook: unconfigure a vm clone
This hook lets you run virt-sysprep on the disk images of a VM
before it is first run. It is useful to do that after a VM is cloned, in
order to remove outdated MAC addresses, ssh keys, or user accounts.
Change-Id: I447a4b01b86b17289030b71264d5d4218c2aa1e3
Signed-off-by: Dan Kenigsberg <danken(a)redhat.com>
---
M configure.ac
A debian/vdsm-hook-sysprep.docs
A debian/vdsm-hook-sysprep.install
M vdsm.spec.in
M vdsm_hooks/Makefile.am
A vdsm_hooks/sysprep/Makefile.am
A vdsm_hooks/sysprep/README
A vdsm_hooks/sysprep/before_vm_start.py
8 files changed, 178 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/86/26886/1
diff --git a/configure.ac b/configure.ac
index 12828be..ee1efb1 100644
--- a/configure.ac
+++ b/configure.ac
@@ -302,6 +302,7 @@
vdsm_hooks/smbios/Makefile
vdsm_hooks/spiceoptions/Makefile
vdsm_hooks/sriov/Makefile
+ vdsm_hooks/sysprep/Makefile
vdsm_hooks/vhostmd/Makefile
vdsm_hooks/vmdisk/Makefile
vdsm_hooks/vmfex/Makefile
diff --git a/debian/vdsm-hook-sysprep.docs b/debian/vdsm-hook-sysprep.docs
new file mode 100644
index 0000000..5ecd9c6
--- /dev/null
+++ b/debian/vdsm-hook-sysprep.docs
@@ -0,0 +1 @@
+COPYING
diff --git a/debian/vdsm-hook-sysprep.install b/debian/vdsm-hook-sysprep.install
new file mode 100644
index 0000000..bfa51cc
--- /dev/null
+++ b/debian/vdsm-hook-sysprep.install
@@ -0,0 +1 @@
+usr/libexec/vdsm/hooks/before_vm_start/60_sysprep
diff --git a/vdsm.spec.in b/vdsm.spec.in
index 08fc15e..e7e02ef 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -550,6 +550,14 @@
This vdsm hook can be used to configure some of
the spice optimization attributes and values..
+%package hook-sysprep
+Summary: Unconfigure guest OS using virt-sysprep
+BuildArch: noarch
+Requires: %{name} = %{version}-%{release}
+
+%description hook-sysprep
+VDSM hook which unconfigures guest OS image.
+
%package hook-vmfex
Summary: vmfex support for VDSM
BuildArch: noarch
@@ -1323,6 +1331,10 @@
%defattr(-, root, root, -)
%{_libexecdir}/%{vdsm_name}/hooks/before_vm_start/50_spiceoptions
+%files hook-sysprep
+%defattr(-, root, root, -)
+%{_libexecdir}/%{vdsm_name}/hooks/before_vm_start/60_sysprep
+
%files hook-vmdisk
%defattr(-, root, root, -)
%{_libexecdir}/%{vdsm_name}/hooks/before_vm_start/50_vmdisk
diff --git a/vdsm_hooks/Makefile.am b/vdsm_hooks/Makefile.am
index b57181b..c919b23 100644
--- a/vdsm_hooks/Makefile.am
+++ b/vdsm_hooks/Makefile.am
@@ -43,6 +43,7 @@
smbios \
spiceoptions \
sriov \
+ sysprep \
vmdisk \
vmfex \
vmfex_dev \
diff --git a/vdsm_hooks/sysprep/Makefile.am b/vdsm_hooks/sysprep/Makefile.am
new file mode 100644
index 0000000..c36ee32
--- /dev/null
+++ b/vdsm_hooks/sysprep/Makefile.am
@@ -0,0 +1,30 @@
+#
+# Copyright 2014 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+
+EXTRA_DIST = \
+ before_vm_start.py
+
+install-data-local:
+ $(MKDIR_P) $(DESTDIR)$(vdsmhooksdir)/before_vm_start
+ $(INSTALL_SCRIPT) $(srcdir)/before_vm_start.py \
+ $(DESTDIR)$(vdsmhooksdir)/before_vm_start/60_sysprep
+
+uninstall-local:
+ $(RM) $(DESTDIR)$(vdsmhooksdir)/before_vm_start/60_sysprep
diff --git a/vdsm_hooks/sysprep/README b/vdsm_hooks/sysprep/README
new file mode 100644
index 0000000..d846b18
--- /dev/null
+++ b/vdsm_hooks/sysprep/README
@@ -0,0 +1,23 @@
+sysprep vdsm hook
+=================================
+This hook lets you run virt-sysprep on the disk images of a VM
+before it is first run.
+
+Installation:
+* Drop before_vm_start.py as /usr/libexec/vdsm/before_vm_start/60_sysprep
+ or (better) install vdsm-hook-sysprep.rpm on each of your hosts.
+
+* Use the engine-config to append the appropriate custom property:
+
+ sudo engine-config -s "UserDefinedVMProperties=sysprep=^(true|false)$"
+
+Usage:
+After cloning a VM, and before running it for the first time, set its "sysprep"
+custom property to "true" and fire it up. virt-sysprep would be called before
+the VM is started and would unconfigure the guest. See the virt-sysprep(1)
+manual page for more details on which information is being removed from the
+guest.
+
+It is highly important to remove the "sysprep" property after it is used.
+Otherwise, it would be re-applied when the VM is started again, removing
+valuable information.
diff --git a/vdsm_hooks/sysprep/before_vm_start.py b/vdsm_hooks/sysprep/before_vm_start.py
new file mode 100755
index 0000000..cea3a7e
--- /dev/null
+++ b/vdsm_hooks/sysprep/before_vm_start.py
@@ -0,0 +1,109 @@
+#!/usr/bin/python
+"""
+Run virt-sysprep on the VM images before starting it.
+
+This hook should be triggered only via the Run Once option
+"""
+
+
+import os
+import sys
+import traceback
+
+import hooking
+
+
+def iterate_sources(domxml):
+ for disk in domxml.getElementsByTagName('disk'):
+ if not disk.hasAttribute('device'):
+ continue
+ if disk.attributes['device'].value != 'disk':
+ continue
+ drivers = disk.getElementsByTagName('driver')
+ if not drivers:
+ continue
+ driver, = drivers
+
+ source, = disk.getElementsByTagName('source')
+ if source.hasAttribute('file'):
+ path = source.attributes['file'].value
+ elif source.hasAttribute('dev'):
+ path = source.attributes['dev'].value
+
+ yield driver.attributes['type'].value, path
+
+
+def build_cmd_line(domxml):
+ cmd = ['virt-sysprep']
+ for format, path in iterate_sources(domxml):
+ cmd.extend(['-a', path, '--format', format])
+ return cmd
+
+
+def main():
+ sysprep = os.environ.get('sysprep')
+ if sysprep is not None:
+ doc = hooking.read_domxml()
+ out, err, rc = hooking.execCmd(build_cmd_line(doc))
+ if rc:
+ raise RuntimeError(err, rc)
+
+
+def test():
+ from xml.dom.minidom import parseString
+
+ TEST1 = """
+ <domain type='qemu'>
+ <uuid>00000000-0000-0000-0000-000000000000</uuid>
+ <memory>219136</memory>
+ <currentMemory>219136</currentMemory>
+ <vcpu>1</vcpu>
+ <os>
+ <type arch='i686' machine='pc'>hvm</type>
+ <boot dev='hd'/>
+ </os>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>destroy</on_crash>
+ <devices>
+ <emulator>/usr/bin/qemu</emulator>
+ <disk type='block' device='disk'>
+ <source dev='/dev/HostVG/QEMUGuest1'/>
+ <target dev='hda' bus='ide'/>
+ </disk>
+ <disk type='block' device='cdrom'>
+ <driver name='qemu' type='raw'/>
+ <target dev='hdc' bus='ide' tray='open'/>
+ <readonly/>
+ </disk>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='raw'/>
+ <source file='/path/to/image'/>
+ <target dev='hdc' bus='ide'/>
+ <readonly/>
+ <alias name='ide0-1-0'/>
+ <address type='drive' controller='0' bus='1' target='0' unit='0'/>
+ </disk>
+ <disk type='block' device='disk'>
+ <driver name='qemu' type='qcow2'/>
+ <source dev='/dev/sdb'/>
+ <geometry cyls='16383' heads='16' secs='63' trans='lba'/>
+ <blockio logical_block_size='512' physical_block_size='4096'/>
+ <target dev='hda' bus='ide'/>
+ </disk>
+ </devices>
+ </domain>"""
+
+ print(build_cmd_line(parseString(TEST1)))
+
+
+if __name__ == '__main__':
+ try:
+ if '--test' in sys.argv:
+ test()
+ else:
+ main()
+ except:
+ hooking.exit_hook('[unexpected error]: %s\n' %
+ traceback.format_exc())
--
To view, visit http://gerrit.ovirt.org/26886
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I447a4b01b86b17289030b71264d5d4218c2aa1e3
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Dan Kenigsberg <danken(a)redhat.com>
9 years, 9 months