Change in vdsm[master]: Avoid redundant volume produces.
by ewarszaw@redhat.com
Eduardo has uploaded a new change for review.
Change subject: Avoid redundant volume produces.
......................................................................
Avoid redundant volume produces.
Add sd.getVolumePath() returns the volume path without produce it.
Deprecating hsm.getVolumePath() and hsm.prepareVolume().
When removed, remove API.prepare(), BindingXMLRPC.volumePrepare(),
API.getPath, BindingXMLRPC.volumeGetPath(), etc.
Change-Id: I3ad53a7e8a66d7f9bdd62048f2bf1f722a490c5c
Signed-off-by: Eduardo <ewarszaw(a)redhat.com>
---
M vdsm/storage/fileSD.py
M vdsm/storage/hsm.py
M vdsm/storage/sd.py
3 files changed, 11 insertions(+), 6 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/91/17991/1
diff --git a/vdsm/storage/fileSD.py b/vdsm/storage/fileSD.py
index 9d1493d..8cbea23 100644
--- a/vdsm/storage/fileSD.py
+++ b/vdsm/storage/fileSD.py
@@ -302,8 +302,7 @@
Return the volume lease (leasePath, leaseOffset)
"""
if self.hasVolumeLeases():
- vol = self.produceVolume(imgUUID, volUUID)
- volumePath = vol.getVolumePath()
+ volumePath = self.getVolumePath(imgUUID, volUUID)
leasePath = volumePath + fileVolume.LEASE_FILEEXT
return leasePath, fileVolume.LEASE_FILEOFFSET
return None, None
@@ -426,8 +425,9 @@
# NFS volumes. In theory it is necessary to fix the permission
# of the leaf only but to not introduce an additional requirement
# (ordered volUUIDs) we fix them all.
- for vol in [self.produceVolume(imgUUID, x) for x in volUUIDs]:
- self.oop.fileUtils.copyUserModeToGroup(vol.getVolumePath())
+ for volUUID in volUUIDs:
+ volPath = self.getVolumePath(imgUUID, volUUID)
+ self.oop.fileUtils.copyUserModeToGroup(volPath)
@classmethod
def format(cls, sdUUID):
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index c754ee8..3545677 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -3076,6 +3076,7 @@
volUUID=volUUID).getInfo()
return dict(info=info)
+ @deprecated
@public
def getVolumePath(self, sdUUID, spUUID, imgUUID, volUUID, options=None):
"""
@@ -3100,8 +3101,7 @@
"""
vars.task.getSharedLock(STORAGE, sdUUID)
path = sdCache.produce(
- sdUUID=sdUUID).produceVolume(imgUUID=imgUUID,
- volUUID=volUUID).getVolumePath()
+ sdUUID=sdUUID).getVolumePath(imgUUID, volUUID)
return dict(path=path)
@public
@@ -3127,6 +3127,7 @@
if fails:
self.log.error("Failed to remove the following rules: %s", fails)
+ @deprecated
@public
def prepareVolume(self, sdUUID, spUUID, imgUUID, volUUID, rw=True,
options=None):
diff --git a/vdsm/storage/sd.py b/vdsm/storage/sd.py
index 36c4877..dde7832 100644
--- a/vdsm/storage/sd.py
+++ b/vdsm/storage/sd.py
@@ -640,6 +640,10 @@
# If it has a repo we don't have multiple domains. Assume single pool
return os.path.join(self.storage_repository, self.getPools()[0])
+ def getVolumePath(self, imgUUID, volUUID):
+ return os.path.join(self.mountpoint, self.sdUUID, 'images', imgUUID,
+ volUUID)
+
def getIsoDomainImagesDir(self):
"""
Get 'images' directory from Iso domain
--
To view, visit http://gerrit.ovirt.org/17991
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I3ad53a7e8a66d7f9bdd62048f2bf1f722a490c5c
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Eduardo <ewarszaw(a)redhat.com>
8 years, 8 months
Change in vdsm[master]: [WIP] Create storage pool using command type 1
by ykaplan@redhat.com
Yeela Kaplan has uploaded a new change for review.
Change subject: [WIP] Create storage pool using command type 1
......................................................................
[WIP] Create storage pool using command type 1
Change-Id: Ia64f6dd2df38d2968f03ce66094f3ba7b4343503
Signed-off-by: Yeela Kaplan <ykaplan(a)redhat.com>
---
M vdsm/storage/blockSD.py
M vdsm/storage/hsm.py
M vdsm/storage/lvm.py
M vdsm/storage/sd.py
M vdsm/storage/sp.py
5 files changed, 71 insertions(+), 74 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/47/23647/1
diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py
index 7980c80..bb7f365 100644
--- a/vdsm/storage/blockSD.py
+++ b/vdsm/storage/blockSD.py
@@ -92,6 +92,12 @@
VERS_METADATA_TAG = (2, 3)
+def encodeVgTags(tagsDict):
+ return [VGTagMetadataRW.METADATA_TAG_PREFIX +
+ lvmTagEncode("%s=%s" % (k, v))
+ for k, v in tagsDict.items()]
+
+
def encodePVInfo(pvInfo):
return (
"pv:%s," % pvInfo["guid"] +
@@ -130,6 +136,13 @@
def lvmTagDecode(s):
return LVM_ENC_ESCAPE.sub(lambda c: unichr(int(c.groups()[0])), s)
+
+
+def encodeVgTags(tagsDict):
+ tags = [VGTagMetadataRW.METADATA_TAG_PREFIX +
+ lvmTagEncode("%s=%s" % (k, v))
+ for k, v in tagsDict.items()]
+ return tuple(tags)
def _tellEnd(devPath):
@@ -523,7 +536,7 @@
# least SDMETADATA/METASIZE units, we know we can use the first
# SDMETADATA bytes of the metadata volume for the SD metadata.
# pass metadata's dev to ensure it is the first mapping
- mapping = cls.getMetaDataMapping(vgName)
+ #mapping = cls.getMetaDataMapping(vgName)
# Create the rest of the BlockSD internal volumes
lvm.createLV(vgName, sd.LEASES, sd.LEASES_SIZE, safe=False)
@@ -558,6 +571,7 @@
logBlkSize, phyBlkSize = lvm.getVGBlockSizes(vgName)
+ mapping = cls.getMetaDataMapping(vgName)
# create domain metadata
# FIXME : This is 99% like the metadata in file SD
# Do we really need to keep the VGUUID?
@@ -565,11 +579,11 @@
initialMetadata = {
sd.DMDK_VERSION: version,
sd.DMDK_SDUUID: sdUUID,
- sd.DMDK_TYPE: storageType,
- sd.DMDK_CLASS: domClass,
+ sd.DMDK_TYPE: sd.storageType(storageType),
+ sd.DMDK_CLASS: sd.class2name(domClass),
sd.DMDK_DESCRIPTION: domainName,
sd.DMDK_ROLE: sd.REGULAR_DOMAIN,
- sd.DMDK_POOLS: [],
+ sd.DMDK_POOLS: '',
sd.DMDK_LOCK_POLICY: '',
sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC: sd.DEFAULT_LEASE_PARAMS[
sd.DMDK_LOCK_RENEWAL_INTERVAL_SEC],
@@ -585,8 +599,8 @@
}
initialMetadata.update(mapping)
-
- md.update(initialMetadata)
+ toAdd = encodeVgTags(initialMetadata)
+ lvm.changeVGTags(vgName, delTags=(), addTags=toAdd, safe=False)
# Mark VG with Storage Domain Tag
try:
@@ -1302,6 +1316,22 @@
# It is time to deactivate the master LV now
lvm.deactivateLVs(self.sdUUID, MASTERLV)
+ def initMasterParams(self, poolMD, params):
+ vgUUID = self.getInfo()['vguuid']
+ vg = lvm.getVGbyUUID(vgUUID)
+ vgName = vg.name
+ toAdd = encodeVgTags(params)
+ lvm.changeVGTags(vgName, addTags=toAdd, safe=False)
+
+ def setMasterDomainParams(self, spUUID, leaseParams):
+ vgUUID = self.getInfo()['vguuid']
+ vg = lvm.getVGbyUUID(vgUUID)
+ vgName = vg.name
+ toAdd = encodeVgTags(leaseParams)
+ toAdd += encodeVgTags({sd.DMDK_POOLS: [spUUID],
+ sd.DMDK_ROLE: sd.MASTER_DOMAIN})
+ lvm.changeVGTags(vgName, delTags=(), addTags=toAdd, safe=False)
+
def refreshDirTree(self):
# create domain images folder
imagesPath = os.path.join(self.domaindir, sd.DOMAIN_IMAGES)
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 5c73dd9..ff27d53 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -942,35 +942,15 @@
if masterDom not in domList:
raise se.InvalidParameterException("masterDom", str(masterDom))
+ if len(domList) > 1:
+ raise NotImplementedError("Create storage pool "
+ "only with master domain")
+
if len(poolName) > sp.MAX_POOL_DESCRIPTION_SIZE:
raise se.StoragePoolDescriptionTooLongError()
- msd = sdCache.produce(sdUUID=masterDom)
- msdType = msd.getStorageType()
- msdVersion = msd.getVersion()
- if (msdType in sd.BLOCK_DOMAIN_TYPES and
- msdVersion in blockSD.VERS_METADATA_LV and
- len(domList) > sp.MAX_DOMAINS):
- raise se.TooManyDomainsInStoragePoolError()
-
- for sdUUID in domList:
- try:
- dom = sdCache.produce(sdUUID=sdUUID)
- # TODO: consider removing validate() from here, as the domains
- # are going to be accessed much later, and may loose validity
- # until then.
- dom.validate()
- except:
- raise se.StorageDomainAccessError(sdUUID)
- # If you remove this condition, remove it from
- # StoragePool.attachSD() too.
- if dom.isData() and (dom.getVersion() > msdVersion):
- raise se.MixedSDVersionError(dom.sdUUID, dom.getVersion(),
- msd.sdUUID, msdVersion)
-
vars.task.getExclusiveLock(STORAGE, spUUID)
- for dom in sorted(domList):
- vars.task.getExclusiveLock(STORAGE, dom)
+ vars.task.getExclusiveLock(STORAGE, masterDom)
return sp.StoragePool(spUUID, self.domainMonitor, self.taskMng).create(
poolName, masterDom, domList, masterVersion, leaseParams)
diff --git a/vdsm/storage/lvm.py b/vdsm/storage/lvm.py
index 0f96df6..c1a0b92 100644
--- a/vdsm/storage/lvm.py
+++ b/vdsm/storage/lvm.py
@@ -302,7 +302,7 @@
if rc != 0:
# Filter might be stale
self.invalidateFilter()
- newCmd = self._addExtraCfg(cmd, safe)
+ newCmd = self._addExtraCfg(cmd, tuple(), safe)
# Before blindly trying again make sure
# that the commands are not identical, because
# the devlist is sorted there is no fear
diff --git a/vdsm/storage/sd.py b/vdsm/storage/sd.py
index 7f00533..c968d7b 100644
--- a/vdsm/storage/sd.py
+++ b/vdsm/storage/sd.py
@@ -766,6 +766,15 @@
def isMaster(self):
return self.getMetaParam(DMDK_ROLE).capitalize() == MASTER_DOMAIN
+ @classmethod
+ def initMasterParams(cls, poolMD, params):
+ poolMD.update(params)
+
+ def setMasterDomainParams(self, spUUID, leaseParams):
+ self.changeLeaseParams(leaseParams)
+ self.setMetaParam(DMDK_POOLS, [spUUID])
+ self.changeRole(MASTER_DOMAIN)
+
def initMaster(self, spUUID, leaseParams):
self.invalidateMetadata()
pools = self.getPools()
@@ -774,9 +783,7 @@
raise se.StorageDomainAlreadyAttached(pools[0], self.sdUUID)
with self._metadata.transaction():
- self.changeLeaseParams(leaseParams)
- self.setMetaParam(DMDK_POOLS, [spUUID])
- self.changeRole(MASTER_DOMAIN)
+ self.setMasterDomainParams(spUUID, leaseParams)
def isISO(self):
return self.getMetaParam(DMDK_CLASS) == ISO_DOMAIN
diff --git a/vdsm/storage/sp.py b/vdsm/storage/sp.py
index 50e29ef..0b00264 100644
--- a/vdsm/storage/sp.py
+++ b/vdsm/storage/sp.py
@@ -588,9 +588,8 @@
@unsecured
def create(self, poolName, msdUUID, domList, masterVersion, leaseParams):
"""
- Create new storage pool with single/multiple image data domain.
- The command will create new storage pool meta-data attach each
- storage domain to that storage pool.
+ Create new storage pool with single image data domain.
+ The command will create new storage pool meta-data
At least one data (images) domain must be provided
'poolName' - storage pool name
'msdUUID' - master domain of this pool (one of domList)
@@ -600,27 +599,20 @@
"masterVersion=%s %s", self.spUUID, poolName, msdUUID,
domList, masterVersion, leaseParams)
- if msdUUID not in domList:
- raise se.InvalidParameterException("masterDomain", msdUUID)
+ # Check the master domain before pool creation
+ try:
+ msd = sdCache.produce(msdUUID)
+ msd.validate()
+ except se.StorageException:
+ self.log.error("Unexpected error", exc_info=True)
+ raise se.StorageDomainAccessError(msdUUID)
- # Check the domains before pool creation
- for sdUUID in domList:
- try:
- domain = sdCache.produce(sdUUID)
- domain.validate()
- if sdUUID == msdUUID:
- msd = domain
- except se.StorageException:
- self.log.error("Unexpected error", exc_info=True)
- raise se.StorageDomainAccessError(sdUUID)
-
- # Validate unattached domains
- if not domain.isISO():
- domain.invalidateMetadata()
- spUUIDs = domain.getPools()
- # Non ISO domains have only 1 pool
- if len(spUUIDs) > 0:
- raise se.StorageDomainAlreadyAttached(spUUIDs[0], sdUUID)
+ # Validate unattached domains
+ msd.invalidateMetadata()
+ spUUIDs = msd.getPools()
+ # Non ISO domains have only 1 pool
+ if len(spUUIDs) > 0:
+ raise se.StorageDomainAlreadyAttached(spUUIDs[0], msdUUID)
fileUtils.createdir(self.poolPath)
self._acquireTemporaryClusterLock(msdUUID, leaseParams)
@@ -629,23 +621,10 @@
self._setSafe()
# Mark 'master' domain
# We should do it before actually attaching this domain to the pool
- # During 'master' marking we create pool metadata and each attached
- # domain should register there
+ # During 'master' marking we create pool metadata
self.createMaster(poolName, msd, masterVersion, leaseParams)
self.__rebuild(msdUUID=msdUUID, masterVersion=masterVersion)
- # Attach storage domains to the storage pool
- # Since we are creating the pool then attach is done from the hsm
- # and not the spm therefore we must manually take the master domain
- # lock
- # TBD: create will receive only master domain and further attaches
- # should be done under SPM
- # Master domain was already attached (in createMaster),
- # no need to reattach
- for sdUUID in domList:
- # No need to attach the master
- if sdUUID != msdUUID:
- self.attachSD(sdUUID)
except Exception:
self.log.error("Create pool %s canceled ", poolName, exc_info=True)
try:
@@ -716,13 +695,14 @@
@unsecured
def initParameters(self, poolName, domain, masterVersion):
- self._getPoolMD(domain).update({
+ params = {
PMDK_SPM_ID: SPM_ID_FREE,
PMDK_LVER: LVER_INVALID,
PMDK_MASTER_VER: masterVersion,
PMDK_POOL_DESCRIPTION: poolName,
PMDK_DOMAINS: {domain.sdUUID: sd.DOM_ACTIVE_STATUS},
- })
+ }
+ domain.initMasterParams(self._getPoolMD(domain), params)
@unsecured
def createMaster(self, poolName, domain, masterVersion, leaseParams):
--
To view, visit http://gerrit.ovirt.org/23647
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ia64f6dd2df38d2968f03ce66094f3ba7b4343503
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Yeela Kaplan <ykaplan(a)redhat.com>
8 years, 8 months
Change in vdsm[master]: [WIP] Create storage domain using command type 1
by ykaplan@redhat.com
Yeela Kaplan has uploaded a new change for review.
Change subject: [WIP] Create storage domain using command type 1
......................................................................
[WIP] Create storage domain using command type 1
All bootstrap operaions are executed using command type 1.
Change-Id: I127af299086ec5572d29686451d4892c9ff0330d
Signed-off-by: Yeela Kaplan <ykaplan(a)redhat.com>
---
M vdsm/storage/blockSD.py
M vdsm/storage/lvm.py
2 files changed, 15 insertions(+), 14 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/46/23646/1
diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py
index 55bd796..7980c80 100644
--- a/vdsm/storage/blockSD.py
+++ b/vdsm/storage/blockSD.py
@@ -517,7 +517,7 @@
# Create metadata service volume
metasize = cls.metaSize(vgName)
- lvm.createLV(vgName, sd.METADATA, "%s" % (metasize))
+ lvm.createLV(vgName, sd.METADATA, "%s" % (metasize), safe=False)
# Create the mapping right now so the index 0 is guaranteed
# to belong to the metadata volume. Since the metadata is at
# least SDMETADATA/METASIZE units, we know we can use the first
@@ -526,11 +526,11 @@
mapping = cls.getMetaDataMapping(vgName)
# Create the rest of the BlockSD internal volumes
- lvm.createLV(vgName, sd.LEASES, sd.LEASES_SIZE)
- lvm.createLV(vgName, sd.IDS, sd.IDS_SIZE)
- lvm.createLV(vgName, sd.INBOX, sd.INBOX_SIZE)
- lvm.createLV(vgName, sd.OUTBOX, sd.OUTBOX_SIZE)
- lvm.createLV(vgName, MASTERLV, MASTERLV_SIZE)
+ lvm.createLV(vgName, sd.LEASES, sd.LEASES_SIZE, safe=False)
+ lvm.createLV(vgName, sd.IDS, sd.IDS_SIZE, safe=False)
+ lvm.createLV(vgName, sd.INBOX, sd.INBOX_SIZE, safe=False)
+ lvm.createLV(vgName, sd.OUTBOX, sd.OUTBOX_SIZE, safe=False)
+ lvm.createLV(vgName, MASTERLV, MASTERLV_SIZE, safe=False)
# Create VMS file system
_createVMSfs(os.path.join("/dev", vgName, MASTERLV))
@@ -591,7 +591,7 @@
# Mark VG with Storage Domain Tag
try:
lvm.replaceVGTag(vgName, STORAGE_UNREADY_DOMAIN_TAG,
- STORAGE_DOMAIN_TAG)
+ STORAGE_DOMAIN_TAG, safe=False)
except se.StorageException:
raise se.VolumeGroupUninitialized(vgName)
diff --git a/vdsm/storage/lvm.py b/vdsm/storage/lvm.py
index 932d69e..0f96df6 100644
--- a/vdsm/storage/lvm.py
+++ b/vdsm/storage/lvm.py
@@ -257,7 +257,7 @@
return self._extraCfg
- def _addExtraCfg(self, cmd, devices=tuple(), safe):
+ def _addExtraCfg(self, cmd, devices=tuple(), safe=True):
newcmd = [constants.EXT_LVM, cmd[0]]
if devices:
conf = _buildConfig(devices)
@@ -656,6 +656,7 @@
globals()["_current_lvmconf"] = _current_lvmconf.replace("locking_type=4",
"locking_type=1")
log.debug("### _current_lvmconf %s", globals()["_current_lvmconf"])
+
def bootstrap(refreshlvs=()):
"""
@@ -1061,7 +1062,7 @@
def createLV(vgName, lvName, size, activate=True, contiguous=False,
- initialTag=None):
+ initialTag=None, safe=True):
"""
Size units: MB (1024 ** 2 = 2 ** 20)B.
"""
@@ -1078,7 +1079,7 @@
if initialTag is not None:
cmd.extend(("--addtag", initialTag))
cmd.extend(("--name", lvName, vgName))
- rc, out, err = _lvminfo.cmd(cmd, _lvminfo._getVGDevs((vgName, )))
+ rc, out, err = _lvminfo.cmd(cmd, _lvminfo._getVGDevs((vgName, )), safe)
if rc == 0:
_lvminfo._invalidatevgs(vgName)
@@ -1280,7 +1281,7 @@
return os.path.exists(lvPath(vgName, lvName))
-def changeVGTags(vgName, delTags=(), addTags=()):
+def changeVGTags(vgName, delTags=(), addTags=(), safe=True):
delTags = set(delTags)
addTags = set(addTags)
if delTags.intersection(addTags):
@@ -1296,7 +1297,7 @@
cmd.extend(("--addtag", tag))
cmd.append(vgName)
- rc, out, err = _lvminfo.cmd(cmd, _lvminfo._getVGDevs((vgName, )))
+ rc, out, err = _lvminfo.cmd(cmd, _lvminfo._getVGDevs((vgName, )), safe)
_lvminfo._invalidatevgs(vgName)
if rc != 0:
raise se.VolumeGroupReplaceTagError(
@@ -1321,8 +1322,8 @@
raise se.VolumeGroupRemoveTagError(vgName)
-def replaceVGTag(vg, oldTag, newTag):
- changeVGTags(vg, [oldTag], [newTag])
+def replaceVGTag(vg, oldTag, newTag, safe=True):
+ changeVGTags(vg, [oldTag], [newTag], safe)
def addVGTags(vgName, tags):
--
To view, visit http://gerrit.ovirt.org/23646
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I127af299086ec5572d29686451d4892c9ff0330d
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Yeela Kaplan <ykaplan(a)redhat.com>
8 years, 8 months
Change in vdsm[master]: [WIP] Towards a more (block) secure HSM.
by ewarszaw@redhat.com
Eduardo has uploaded a new change for review.
Change subject: [WIP] Towards a more (block) secure HSM.
......................................................................
[WIP] Towards a more (block) secure HSM.
Change-Id: I30df4ee5cdb6b44cf14d8cb155436aac7442a07d
---
M vdsm/storage/hsm.py
M vdsm/storage/lvm.py
M vdsm/storage/sp.py
3 files changed, 25 insertions(+), 5 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/18/2218/1
--
To view, visit http://gerrit.ovirt.org/2218
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I30df4ee5cdb6b44cf14d8cb155436aac7442a07d
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Eduardo <ewarszaw(a)redhat.com>
8 years, 8 months
Change in vdsm[master]: Makefile.am: create vdsm logs on make install
by Douglas Schilling Landgraf
Douglas Schilling Landgraf has uploaded a new change for review.
Change subject: Makefile.am: create vdsm logs on make install
......................................................................
Makefile.am: create vdsm logs on make install
Currently during the RPM install vdsm creates the below logs files:
/var/log/vdsm/{metadata.log,mom.log,supervdsm.log,vdsm.log}
and we should do the same for non rpm distro.
Change-Id: I7f5dc4ca01fecddc5226255e37b0bab68b8c479f
Signed-off-by: Douglas Schilling Landgraf <dougsland(a)redhat.com>
---
M vdsm/Makefile.am
1 file changed, 4 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/82/23882/1
diff --git a/vdsm/Makefile.am b/vdsm/Makefile.am
index 66b9af5..5e7f2cf 100644
--- a/vdsm/Makefile.am
+++ b/vdsm/Makefile.am
@@ -167,6 +167,10 @@
$(MKDIR_P) $(DESTDIR)$(vdsmlibdir)/upgrade
$(MKDIR_P) $(DESTDIR)$(vdsmbackupdir)
$(MKDIR_P) $(DESTDIR)$(localstatedir)/lib/libvirt/qemu/channels
+ touch $(DESTDIR)$(vdsmlogdir)/{metadata.log,mom.log,supervdsm.log,vdsm.log}
+ chmod 0644 $(DESTDIR)$(vdsmlogdir)/{metadata.log,mom.log,supervdsm.log,vdsm.log}
+ chown $(VDSMUSER):$(VDSMGROUP) $(DESTDIR)$(vdsmlogdir)/{metadata.log,mom.log,vdsm.log}
+ chown root:root $(DESTDIR)$(vdsmlogdir)/supervdsm.log
uninstall-local: \
uninstall-data-dhclient-hooks \
--
To view, visit http://gerrit.ovirt.org/23882
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I7f5dc4ca01fecddc5226255e37b0bab68b8c479f
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Douglas Schilling Landgraf <dougsland(a)redhat.com>
8 years, 8 months
Change in vdsm[master]: sp: improve pool creation error handling
by Federico Simoncelli
Federico Simoncelli has uploaded a new change for review.
Change subject: sp: improve pool creation error handling
......................................................................
sp: improve pool creation error handling
Change-Id: I0cce08e368dec092222c081609d0663d7990ab10
Signed-off-by: Federico Simoncelli <fsimonce(a)redhat.com>
---
M vdsm/storage/sp.py
1 file changed, 17 insertions(+), 14 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/18/22818/1
diff --git a/vdsm/storage/sp.py b/vdsm/storage/sp.py
index 4f4765e..247973c 100644
--- a/vdsm/storage/sp.py
+++ b/vdsm/storage/sp.py
@@ -620,27 +620,30 @@
# lock
# TBD: create will receive only master domain and further attaches
# should be done under SPM
-
- # Master domain was already attached (in createMaster),
- # no need to reattach
- for sdUUID in domList:
- # No need to attach the master
- if sdUUID != msdUUID:
- self.attachSD(sdUUID)
+ try:
+ for sdUUID in domList:
+ # Master domain was already attached (in createMaster)
+ if sdUUID != msdUUID:
+ self.attachSD(sdUUID)
+ except Exception:
+ # FIXME: detachSD will fail for the master domain, we need a
+ # special handling (master must be detached from the pool).
+ self.__cleanupDomains(domList, msdUUID, masterVersion)
except Exception:
- self.log.error("Create pool %s canceled ", poolName, exc_info=True)
+ self.log.exception('create pool %s canceled', self.spUUID)
try:
fileUtils.cleanupdir(self.poolPath)
- self.__cleanupDomains(domList, msdUUID, masterVersion)
- except:
- self.log.error("Cleanup failed due to an unexpected error",
- exc_info=True)
+ except Exception:
+ self.log.exception('pool %s cleanup failed', self.spUUID)
raise
finally:
self._setUnsafe()
-
self._releaseTemporaryClusterLock(msdUUID)
- self.stopMonitoringDomains()
+ # stopMonitoringDomains needs masterDomain and the monitoring
+ # domains threads are started only if the master was properly
+ # initialized and set
+ if self.masterDomain:
+ self.stopMonitoringDomains()
return True
--
To view, visit http://gerrit.ovirt.org/22818
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I0cce08e368dec092222c081609d0663d7990ab10
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Federico Simoncelli <fsimonce(a)redhat.com>
8 years, 8 months
Change in vdsm[master]: vm: small refactor for _normalizeVdsmImg
by Federico Simoncelli
Federico Simoncelli has uploaded a new change for review.
Change subject: vm: small refactor for _normalizeVdsmImg
......................................................................
vm: small refactor for _normalizeVdsmImg
Change-Id: Ie68292eee4b82fbe8527e3960739979cfe117dfa
Signed-off-by: Federico Simoncelli <fsimonce(a)redhat.com>
---
M vdsm/vm.py
1 file changed, 18 insertions(+), 17 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/57/19157/1
diff --git a/vdsm/vm.py b/vdsm/vm.py
index 92d274e..2605f24 100644
--- a/vdsm/vm.py
+++ b/vdsm/vm.py
@@ -1779,25 +1779,26 @@
break
return str(idx)
- def _normalizeVdsmImg(self, drv):
- drv['reqsize'] = drv.get('reqsize', '0') # Backward compatible
- if 'device' not in drv:
- drv['device'] = 'disk'
+ def _normalizeVdsmImg(self, drive):
+ drive['device'] = drive.get('device', 'disk') # Disk by default
+ drive['reqsize'] = drive.get('reqsize', '0') # Backward compatible
- if drv['device'] == 'disk':
- res = self.cif.irs.getVolumeSize(drv['domainID'], drv['poolID'],
- drv['imageID'], drv['volumeID'])
- try:
- drv['truesize'] = res['truesize']
- drv['apparentsize'] = res['apparentsize']
- except KeyError:
- self.log.error("Unable to get volume size for %s",
- drv['volumeID'], exc_info=True)
- raise RuntimeError("Volume %s is corrupted or missing" %
- drv['volumeID'])
+ if drive['device'] == 'disk':
+ volInfo = self.cif.irs.getVolumeInfo(
+ drive['domainID'], drive['poolID'], drive['imageID'],
+ drive['volumeID'])
+
+ if volInfo.get('status', {}).get('code', -1):
+ self.log.error(
+ "Unable to get volume info for %s", drive['volumeID'])
+ raise RuntimeError(
+ "Volume %s is corrupted or missing" % drive['volumeID'])
+
+ drive['truesize'] = volInfo['info']['truesize']
+ drive['apparentsize'] = volInfo['info']['apparentsize']
else:
- drv['truesize'] = 0
- drv['apparentsize'] = 0
+ drive['truesize'] = 0
+ drive['apparentsize'] = 0
@classmethod
def _normalizeDriveSharedAttribute(self, drive):
--
To view, visit http://gerrit.ovirt.org/19157
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ie68292eee4b82fbe8527e3960739979cfe117dfa
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Federico Simoncelli <fsimonce(a)redhat.com>
8 years, 8 months
Change in vdsm[master]: vmDevices: introduce VmDeviceContainer
by mpoledni@redhat.com
Martin Polednik has uploaded a new change for review.
Change subject: vmDevices: introduce VmDeviceContainer
......................................................................
vmDevices: introduce VmDeviceContainer
EARLY WORK IN PROGRESS: VmDeviceContainer is structure that will allow
us to store devices as a class instances while keeping backwards
compatibility with old self.conf['devices']
Change-Id: I65debd35115da078df0c0cb6f50c57feb984c5a3
Signed-off-by: Martin Polednik <mpoledni(a)redhat.com>
---
M vdsm/vm.py
1 file changed, 33 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/38/21138/1
diff --git a/vdsm/vm.py b/vdsm/vm.py
index 5ae54d7..36cdbb1 100644
--- a/vdsm/vm.py
+++ b/vdsm/vm.py
@@ -1727,6 +1727,39 @@
return m
+class VmDeviceContainer(dict):
+ @property
+ def legacy(self):
+ """
+ Return list of device dicts that represents backwards-compatible
+ self.conf['devices']
+
+ [..., {..., 'type': 'disk', ...}, ...]
+ """
+ deviceList = []
+ for key in self.keys():
+ for device in self[key]:
+ # loop through devices __slots__ and return all set attributes
+ deviceList.append(dict((attr, getattr(device, attr))
+ for attr in device.__slots__
+ if hasattr(device, attr)))
+
+ return deviceList
+
+ def restoreLegacy(self, state):
+ """
+ Reconstruct container using old self.conf['devices'] structure of
+
+ [..., {..., 'type': 'disk', ...}, ...]
+ to
+
+ VmDeviceContainer[DISK_DEVICES] =
+ [..., {..., 'type': 'disk', ...}, ...]
+ """
+ for device in state:
+ self[device['type']] = device
+
+
class Vm(object):
"""
Used for abstracting communication between various parts of the
--
To view, visit http://gerrit.ovirt.org/21138
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I65debd35115da078df0c0cb6f50c57feb984c5a3
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Martin Polednik <mpoledni(a)redhat.com>
8 years, 8 months
Change in vdsm[master]: vmDevices: add mechanism to persist vmDevice defaults
by mpoledni@redhat.com
Martin Polednik has uploaded a new change for review.
Change subject: vmDevices: add mechanism to persist vmDevice defaults
......................................................................
vmDevices: add mechanism to persist vmDevice defaults
Multiple vmDevices such as BalloonDevice or watchdogDevice currently
do not persist their defaults in class attributes but rather use them in
XML directly, hiding their existence. This patch aims to change this
behavior by implementing vmDevice._defaults(), which adds the default
attributes to instance directly to enable future persistence of these
classes.
Change-Id: Idc8383cbce78490c8dfab1c253883a06459f1547
Signed-off-by: Martin Polednik <mpoledni(a)redhat.com>
---
M tests/vmTests.py
M vdsm/vm.py
2 files changed, 45 insertions(+), 29 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/66/21066/1
diff --git a/tests/vmTests.py b/tests/vmTests.py
index 1f69f0a..5c813fc 100644
--- a/tests/vmTests.py
+++ b/tests/vmTests.py
@@ -261,7 +261,7 @@
def testWatchdogXML(self):
watchdogXML = '<watchdog action="none" model="i6300esb"/>'
- dev = {'device': 'watchdog', 'type': 'watchdog',
+ dev = {'device': 'watchdog',
'specParams': {'model': 'i6300esb', 'action': 'none'}}
watchdog = vm.WatchdogDevice(self.conf, self.log, **dev)
self.assertXML(watchdog.getXML(), watchdogXML)
diff --git a/vdsm/vm.py b/vdsm/vm.py
index 796735f..36b9598 100644
--- a/vdsm/vm.py
+++ b/vdsm/vm.py
@@ -1167,6 +1167,7 @@
class VmDevice(object):
def __init__(self, conf, log, **kwargs):
+ self.specParams = {}
for attr, value in kwargs.iteritems():
try:
setattr(self, attr, value)
@@ -1175,6 +1176,10 @@
self.conf = conf
self.log = log
self._deviceXML = None
+ self._defaults()
+
+ def _defaults(self):
+ pass
def __str__(self):
attrs = [':'.join((a, str(getattr(self, a)))) for a in dir(self)
@@ -1192,10 +1197,13 @@
elemAttrs['type'] = deviceType
for attrName in attributes:
- if not hasattr(self, attrName):
+ if attrName in self.specParams:
+ attr = self.specParams[attrName]
+ elif hasattr(self, attrName):
+ attr = getattr(self, attrName)
+ else:
continue
- attr = getattr(self, attrName)
if isinstance(attr, dict):
element.appendChildWithArgs(attrName, **attr)
else:
@@ -1215,37 +1223,44 @@
class ControllerDevice(VmDevice):
+ def _defaults(self):
+ if self.device == 'virtio-serial':
+ if 'index' not in self.specParams:
+ self.index = '0'
+ if 'ports' not in self.specParams:
+ self.ports = '16'
def getXML(self):
"""
Create domxml for controller device
"""
ctrl = self.createXmlElem('controller', self.device,
- ['index', 'model', 'master', 'address'])
- if self.device == 'virtio-serial':
- ctrl.setAttrs(index='0', ports='16')
+ ['index', 'model', 'master', 'address',
+ 'ports'])
return ctrl
class VideoDevice(VmDevice):
+ def _defaults(self):
+ if 'vram' not in self.specParams:
+ self.specParams['vram'] = '32768'
+ if 'heads' not in self.specParams:
+ self.specParams['heads'] = '1'
def getXML(self):
"""
Create domxml for video device
"""
video = self.createXmlElem('video', None, ['address'])
- sourceAttrs = {'vram': self.specParams.get('vram', '32768'),
- 'heads': self.specParams.get('heads', '1')}
- if 'ram' in self.specParams:
- sourceAttrs['ram'] = self.specParams['ram']
+ model = self.createXmlElem('model', self.device,
+ ['vram', 'heads', 'ram'])
- video.appendChildWithArgs('model', type=self.device, **sourceAttrs)
+ video.appendChild(model)
return video
class SoundDevice(VmDevice):
-
def getXML(self):
"""
Create domxml for sound device
@@ -1256,7 +1271,6 @@
class NetworkInterfaceDevice(VmDevice):
-
def __init__(self, conf, log, **kwargs):
# pyLint can't tell that the Device.__init__() will
# set a nicModel attribute, so modify the kwarg list
@@ -1646,7 +1660,6 @@
class BalloonDevice(VmDevice):
-
def getXML(self):
"""
Create domxml for a memory balloon device.
@@ -1662,11 +1675,11 @@
class WatchdogDevice(VmDevice):
- def __init__(self, *args, **kwargs):
- super(WatchdogDevice, self).__init__(*args, **kwargs)
-
- if not hasattr(self, 'specParams'):
- self.specParams = {}
+ def _defaults(self):
+ if 'model' not in self.specParams:
+ self.specParams['model'] = 'i6300esb'
+ if 'action' not in self.specParams:
+ self.specParams['action'] = 'none'
def getXML(self):
"""
@@ -1677,9 +1690,8 @@
function='0x0'/>
</watchdog>
"""
- m = self.createXmlElem(self.type, None, ['address'])
- m.setAttrs(model=self.specParams.get('model', 'i6300esb'),
- action=self.specParams.get('action', 'none'))
+ m = self.createXmlElem(self.device, None, ['address', 'model',
+ 'action'])
return m
@@ -1692,11 +1704,8 @@
<address ... />
</smartcard>
"""
- card = self.createXmlElem(self.device, None, ['address'])
- sourceAttrs = {'mode': self.specParams['mode']}
- if sourceAttrs['mode'] != 'host':
- sourceAttrs['type'] = self.specParams['type']
- card.setAttrs(**sourceAttrs)
+ card = self.createXmlElem(self.device, None, ['address', 'mode',
+ 'type'])
return card
@@ -1713,6 +1722,12 @@
class ConsoleDevice(VmDevice):
+ def _defaults(self):
+ self.type = 'pty'
+ self.port = '0'
+ self.specParams['type'] = 'virtio'
+ self.specParams['port'] = '0'
+
def getXML(self):
"""
Create domxml for a console device.
@@ -1721,8 +1736,9 @@
<target type='virtio' port='0'/>
</console>
"""
- m = self.createXmlElem('console', 'pty')
- m.appendChildWithArgs('target', type='virtio', port='0')
+ m = self.createXmlElem(self.device, self.type)
+ m.appendChildWithArgs('target', None, type=self.specParams['type'],
+ port=self.specParams['port'])
return m
--
To view, visit http://gerrit.ovirt.org/21066
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Idc8383cbce78490c8dfab1c253883a06459f1547
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Martin Polednik <mpoledni(a)redhat.com>
8 years, 8 months
Change in vdsm[master]: gluster: [WIP]Get size information of gluster volume
by avishwan@redhat.com
Aravinda VK has uploaded a new change for review.
Change subject: gluster: [WIP]Get size information of gluster volume
......................................................................
gluster: [WIP]Get size information of gluster volume
Change-Id: I358d4f3bf793ecc1a01e0592d68919d1405f6e19
Signed-off-by: Aravinda VK <avishwan(a)redhat.com>
---
M client/vdsClientGluster.py
M vdsm.spec.in
M vdsm/gluster/Makefile.am
M vdsm/gluster/__init__.py
M vdsm/gluster/api.py
A vdsm/gluster/gfapi.py
6 files changed, 100 insertions(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/22/17822/1
diff --git a/client/vdsClientGluster.py b/client/vdsClientGluster.py
index 90af83e..644bfe3 100644
--- a/client/vdsClientGluster.py
+++ b/client/vdsClientGluster.py
@@ -424,6 +424,17 @@
pp.pprint(status)
return status['status']['code'], status['status']['message']
+ def do_glusterVolumeSize(self, args):
+ params = self._eqSplit(args)
+ try:
+ volumeName = params.get('volumeName', '')
+ except:
+ raise ValueError
+
+ status = self.s.glusterVolumeSize(volumeName)
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
def getGlusterCmdDict(serv):
return \
@@ -705,4 +716,9 @@
'not set'
'(swift, glusterd, smb, memcached)'
)),
+ 'glusterVolumeSize': (
+ serv.do_glusterVolumeSize,
+ ('volumeName=<volume name>',
+ 'Returns total, available and used space status of gluster volume'
+ )),
}
diff --git a/vdsm.spec.in b/vdsm.spec.in
index 21b3565..e49e102 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -1271,6 +1271,7 @@
%doc COPYING
%{_datadir}/%{vdsm_name}/gluster/api.py*
%{_datadir}/%{vdsm_name}/gluster/vdsmapi-gluster-schema.json
+%{_datadir}/%{vdsm_name}/gluster/gfapi.py*
%{_datadir}/%{vdsm_name}/gluster/hooks.py*
%{_datadir}/%{vdsm_name}/gluster/services.py*
%endif
diff --git a/vdsm/gluster/Makefile.am b/vdsm/gluster/Makefile.am
index dd5434d..9c2989f 100644
--- a/vdsm/gluster/Makefile.am
+++ b/vdsm/gluster/Makefile.am
@@ -26,6 +26,7 @@
api.py \
cli.py \
exception.py \
+ gfapi.py \
hooks.py \
hostname.py \
services.py \
diff --git a/vdsm/gluster/__init__.py b/vdsm/gluster/__init__.py
index bec70ea..e5a2fd6 100644
--- a/vdsm/gluster/__init__.py
+++ b/vdsm/gluster/__init__.py
@@ -22,7 +22,7 @@
import tempfile
from functools import wraps
-MODULE_LIST = ('cli', 'hooks', 'services')
+MODULE_LIST = ('cli', 'hooks', 'services', 'gfapi')
def makePublic(func):
diff --git a/vdsm/gluster/api.py b/vdsm/gluster/api.py
index 4bd8308..89546c1 100644
--- a/vdsm/gluster/api.py
+++ b/vdsm/gluster/api.py
@@ -287,6 +287,22 @@
status = self.svdsmProxy.glusterServicesGet(serviceNames)
return {'services': status}
+ @exportAsVerb
+ def volumeSize(self, volumeName, options=None):
+ data = self.svdsmProxy.glusterVolumeStatvfs(volumeName)
+ # f_blocks = Total number of blocks
+ # f_bfree = Total number of blocks free
+ # f_bavail = Total number of blocks available for non root user
+ # total blocks available = f_blocks - (f_bfree - f_bavail)
+ total_blocks_available = data['f_blocks'] - \
+ (data['f_bfree'] - data['f_bavail'])
+ return {
+ 'total': total_blocks_available * data.f_bsize / 1024,
+ 'free': data['f_bavail'] * data['f_bsize'] / 1024,
+ 'used': (total_blocks_available - data['f_bavail']) * \
+ data['f_bsize'] / 1024
+ }
+
def getGlusterMethods(gluster):
l = []
diff --git a/vdsm/gluster/gfapi.py b/vdsm/gluster/gfapi.py
new file mode 100644
index 0000000..abfdabd
--- /dev/null
+++ b/vdsm/gluster/gfapi.py
@@ -0,0 +1,65 @@
+#
+# Copyright 2013 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+from ctypes import *
+
+# import exception as ge
+from . import makePublic
+
+GLUSTER_VOL_PROTOCAL='tcp'
+GLUSTER_VOL_HOST='localhost'
+GLUSTER_VOL_PORT=24007
+
+class Stat (Structure):
+ _fields_ = [
+ ('f_bsize', c_ulong),
+ ('f_frsize', c_ulong),
+ ('f_blocks', c_ulong),
+ ('f_bfree', c_ulong),
+ ('f_bavail', c_ulong),
+ ('f_files', c_ulong),
+ ('f_ffree', c_ulong),
+ ('f_favail', c_ulong),
+ ('f_fsid', c_ulong),
+ ('f_flag', c_ulong),
+ ('f_namemax', c_ulong),
+ ('__f_spare', c_int * 6),
+ ]
+
+
+api = CDLL("libgfapi.so",RTLD_GLOBAL)
+api.glfs_statvfs.restype = c_int
+api.glfs_statvfs.argtypes = [c_void_p, c_char_p, POINTER(Stat)]
+
+@makePublic
+def volumeStatvfs(volumeId):
+ path = "/"
+ fs = api.glfs_new(volumeId)
+ api.glfs_set_volfile_server(fs,
+ GLUSTER_VOL_PROTOCAL,
+ GLUSTER_VOL_HOST,
+ GLUSTER_VOL_PORT)
+ api.glfs_init(fs)
+
+ x = Stat()
+ rc = api.glfs_statvfs(fs, path, byref(x))
+ statvfsData = {}
+ for k in x._fields_:
+ statvfsData[k[0]] = getattr(x, k[0])
+ return statvfsData
--
To view, visit http://gerrit.ovirt.org/17822
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I358d4f3bf793ecc1a01e0592d68919d1405f6e19
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Aravinda VK <avishwan(a)redhat.com>
8 years, 8 months