Change in vdsm[master]: image: unify the prezeroing optimizations
by Federico Simoncelli
Federico Simoncelli has uploaded a new change for review.
Change subject: image: unify the prezeroing optimizations
......................................................................
image: unify the prezeroing optimizations
The same prezeroing optimization logic was used in multiple places, this
patch unifies it in __optimizedCreateVolume.
Change-Id: I0fd90f85e9debf98bcac07d1b8d4b38c319c33f2
Signed-off-by: Federico Simoncelli <fsimonce(a)redhat.com>
---
M vdsm/storage/image.py
1 file changed, 43 insertions(+), 45 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/04/8504/1
diff --git a/vdsm/storage/image.py b/vdsm/storage/image.py
index e86d94c..19ab078 100644
--- a/vdsm/storage/image.py
+++ b/vdsm/storage/image.py
@@ -454,6 +454,37 @@
except Exception:
self.log.error("Unexpected error", exc_info=True)
+ def __optimizedCreateVolume(self, domain, imgUUID, size, apparentSize,
+ volFormat, preallocate, diskType, volUUID, desc, srcImgUUID,
+ srcVolUUID):
+ # To avoid 'prezeroing' preallocated volume on NFS domain,
+ # we create the target volume with minimal size and after
+ # that we'll change its metadata back to the original size.
+ if (volFormat == volume.COW_FORMAT
+ or preallocate == volume.SPARSE_VOL):
+ volTmpSize = size
+ else:
+ volTmpSize = TEMPORARY_VOLUME_SIZE
+
+ domain.createVolume(imgUUID, volTmpSize, volFormat, preallocate,
+ diskType, volUUID, desc, srcImgUUID, srcVolUUID)
+ newVolume = domain.produceVolume(imgUUID, volUUID)
+
+ if volFormat == volume.RAW_FORMAT:
+ extendSize = size
+ else:
+ extendSize = apparentSize
+
+ # Extend volume (for LV only) size to the actual size
+ newVolume.extend((extendSize + 511) / 512)
+
+ # Change destination volume metadata back to the original
+ # size. Heavy operation, do it only if necessary.
+ if volTmpSize != size:
+ newVolume.setSize(size)
+
+ return newVolume
+
def _createTargetImage(self, destDom, srcSdUUID, imgUUID):
# Before actual data copying we need perform several operation
# such as: create all volumes, create fake template if needed, ...
@@ -500,34 +531,12 @@
# find out src volume parameters
volParams = srcVol.getVolumeParams(bs=1)
- # To avoid 'prezeroing' preallocated volume on NFS domain,
- # we create the target volume with minimal size and after
- # that w'll change its metadata back to the original size.
- if (volParams['volFormat'] == volume.COW_FORMAT
- or volParams['prealloc'] == volume.SPARSE_VOL):
- volTmpSize = volParams['size']
- else:
- volTmpSize = TEMPORARY_VOLUME_SIZE # in sectors (10M)
-
- destDom.createVolume(imgUUID=imgUUID, size=volTmpSize,
- volFormat=volParams['volFormat'],
- preallocate=volParams['prealloc'],
- diskType=volParams['disktype'],
- volUUID=srcVol.volUUID,
- desc=volParams['descr'],
- srcImgUUID=pimg,
- srcVolUUID=volParams['parent'])
-
- dstVol = destDom.produceVolume(imgUUID=imgUUID,
- volUUID=srcVol.volUUID)
-
- # Extend volume (for LV only) size to the actual size
- dstVol.extend((volParams['apparentsize'] + 511) / 512)
-
- # Change destination volume metadata back to the original
- # size.
- if volTmpSize != volParams['size']:
- dstVol.setSize(volParams['size'])
+ dstVol = self.__optimizedCreateVolume(
+ destDom, imgUUID, volParams['size'],
+ volParams['apparentsize'], volParams['volFormat'],
+ volParams['prealloc'], volParams['disktype'],
+ srcVol.volUUID, volParams['descr'], srcImgUUID=pimg,
+ srcVolUUID=volParams['parent'])
dstChain.append(dstVol)
except se.StorageException:
@@ -760,25 +769,14 @@
self.log.info("delete image %s on domain %s before overwriting", dstImgUUID, dstSdUUID)
self.delete(dstSdUUID, dstImgUUID, postZero, force=True)
- # To avoid 'prezeroing' preallocated volume on NFS domain,
- # we create the target volume with minimal size and after that w'll change
- # its metadata back to the original size.
- tmpSize = TEMPORARY_VOLUME_SIZE # in sectors (10M)
- destDom.createVolume(imgUUID=dstImgUUID, size=tmpSize,
- volFormat=dstVolFormat, preallocate=volParams['prealloc'],
- diskType=volParams['disktype'], volUUID=dstVolUUID, desc=descr,
- srcImgUUID=volume.BLANK_UUID, srcVolUUID=volume.BLANK_UUID)
+ dstVol = self.__optimizedCreateVolume(
+ destDom, dstImgUUID, volParams['size'],
+ volParams['apparentsize'], dstVolFormat,
+ volParams['prealloc'], volParams['disktype'],
+ dstVolUUID, descr, volume.BLANK_UUID,
+ volume.BLANK_UUID)
- dstVol = sdCache.produce(dstSdUUID).produceVolume(imgUUID=dstImgUUID, volUUID=dstVolUUID)
- # For convert to 'raw' we need use the virtual disk size instead of apparent size
- if dstVolFormat == volume.RAW_FORMAT:
- newsize = volParams['size']
- else:
- newsize = volParams['apparentsize']
- dstVol.extend(newsize)
dstPath = dstVol.getVolumePath()
- # Change destination volume metadata back to the original size.
- dstVol.setSize(volParams['size'])
except se.StorageException, e:
self.log.error("Unexpected error", exc_info=True)
raise
--
To view, visit http://gerrit.ovirt.org/8504
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I0fd90f85e9debf98bcac07d1b8d4b38c319c33f2
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Federico Simoncelli <fsimonce(a)redhat.com>
8 years, 1 month
Change in vdsm[master]: [WIP] BZ#844656 Release the lock during _findDomain
by Federico Simoncelli
Federico Simoncelli has uploaded a new change for review.
Change subject: [WIP] BZ#844656 Release the lock during _findDomain
......................................................................
[WIP] BZ#844656 Release the lock during _findDomain
Signed-off-by: Federico Simoncelli <fsimonce(a)redhat.com>
Change-Id: I8088d5fe716a3a08c3e5cef2d2d9a654ee96f60a
---
M vdsm/storage/sdc.py
1 file changed, 21 insertions(+), 7 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/22/6822/1
--
To view, visit http://gerrit.ovirt.org/6822
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I8088d5fe716a3a08c3e5cef2d2d9a654ee96f60a
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Federico Simoncelli <fsimonce(a)redhat.com>
8 years, 1 month
Change in vdsm[master]: dump the core of a domain
by shaohef@linux.vnet.ibm.com
ShaoHe Feng has uploaded a new change for review.
Change subject: dump the core of a domain
......................................................................
dump the core of a domain
libvirt support an API to dump the core of a domain on a given file for
analysis when guest OS crash.
There are two kind of dump files. one is QEMU suspend to disk image.
the other is core file which like kdump file butcontains registers'
value.
It's helpful to support by VDSM to find root cause if a guest gets hang
and kdump isn't set up in it. This would be a good RAS feature.
Here's the definition of the new API:
coreDump:
This method will dump the core of a domain on a given file for
analysis.
Input parameter:
vmId - VM UUID
to - the core file named by the user
flags - defined in libvirt.py
VIR_DUMP_CRASH
VIR_DUMP_LIVE
VIR_DUMP_BYPASS_CACHE
VIR_DUMP_RESET
VIR_DUMP_MEMORY_ONLY
Return value:
success: return doneCode
failure: return errCode including underlying libvirt error message.
Change-Id: If4aac9e747dc7aa64a6ff5ef256a7a4375aa2bb5
Signed-off-by: ShaoHe Feng <shaohef(a)linux.vnet.ibm.com>
---
M vdsm/API.py
M vdsm/BindingXMLRPC.py
M vdsm/define.py
M vdsm/libvirtvm.py
M vdsm_cli/vdsClient.py
5 files changed, 80 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/29/7329/1
diff --git a/vdsm/API.py b/vdsm/API.py
index 19cbb42..e2b24cb 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -244,6 +244,12 @@
self.log.debug("Error creating VM", exc_info=True)
return errCode['unexpected']
+ def coreDump(self, to, flags):
+ v = self._cif.vmContainer.get(self._UUID)
+ if not v:
+ return errCode['noVM']
+ return v.coreDump(to, flags)
+
def desktopLock(self):
"""
Lock user session in guest operating system using guest agent.
diff --git a/vdsm/BindingXMLRPC.py b/vdsm/BindingXMLRPC.py
index cc5300f..be71e6a 100644
--- a/vdsm/BindingXMLRPC.py
+++ b/vdsm/BindingXMLRPC.py
@@ -208,6 +208,10 @@
vm = API.VM(vmId)
return vm.cont()
+ def vmCoreDump(self, vmId, to, flags):
+ vm = API.VM(vmId)
+ return vm.coreDump(to, flags)
+
def vmReset(self, vmId):
vm = API.VM(vmId)
return vm.reset()
@@ -725,6 +729,7 @@
(self.getVMList, 'list'),
(self.vmPause, 'pause'),
(self.vmCont, 'cont'),
+ (self.vmCoreDump, 'coreDump'),
(self.vmSnapshot, 'snapshot'),
(self.vmMerge, 'merge'),
(self.vmMergeStatus, 'mergeStatus'),
diff --git a/vdsm/define.py b/vdsm/define.py
index 31deb4f..1fedac5 100644
--- a/vdsm/define.py
+++ b/vdsm/define.py
@@ -114,6 +114,10 @@
'mergeErr': {'status':
{'code': 52,
'message': 'Merge failed'}},
+ 'coreDumpErr': {'status':
+ {'code': 54,
+ 'message':
+ 'Failed to get coreDump file'}},
'recovery': {'status':
{'code': 99,
'message':
diff --git a/vdsm/libvirtvm.py b/vdsm/libvirtvm.py
index 4554fee..cbd9f96 100644
--- a/vdsm/libvirtvm.py
+++ b/vdsm/libvirtvm.py
@@ -1904,6 +1904,27 @@
self.saveState()
+ def coreDump(self, to, flags):
+
+ def reportError(key='coreDumpErr', msg=None):
+ self.log.error("get coreDump failed", exc_info=True)
+ if msg == None:
+ error = errCode[key]
+ else:
+ error = {'status' : {'code': errCode[key] \
+ ['status']['code'], 'message': msg}}
+ return error
+
+ if self._dom == None:
+ return reportError()
+ try:
+ self._dom.coreDump(to, flags)
+ except libvirt.libvirtError, e:
+ if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
+ return reportError(key='noVM')
+ return reportError(msg=e.message)
+ return {'status': doneCode}
+
def changeCD(self, drivespec):
return self._changeBlockDev('cdrom', 'hdc', drivespec)
diff --git a/vdsm_cli/vdsClient.py b/vdsm_cli/vdsClient.py
index eeb7c95..cdcd3a8 100644
--- a/vdsm_cli/vdsClient.py
+++ b/vdsm_cli/vdsClient.py
@@ -1589,6 +1589,33 @@
return status['status']['code'], status['status']['message']
+ def coreDump(self, args):
+ DUMPFLAGS = {'crash': 1 << 0,
+ 'live': 1 << 1,
+ 'bypass-cache': 1 << 2,
+ 'reset': 1 << 3,
+ 'memory-only': 1 << 4}
+ flags = 0
+ vmId = args[0]
+ coreFile = args[1]
+ params = {}
+ if len(args) > 2:
+ for arg in args[2:]:
+ kv = arg.split('=', 1)
+ if len(kv) < 2:
+ params[kv[0]] = "True"
+ else:
+ params[kv[0]] = kv[1]
+ for k, v in params.items():
+ if v.lower() == "true" or not v:
+ try:
+ flags = flags + DUMPFLAGS[k]
+ except KeyError:
+ print "unrecognized optoin %s for cormDump command" % k
+ response = self.s.coreDump(vmId, coreFile, flags)
+ return response['status']['code'], response['status']['message']
+
+
if __name__ == '__main__':
if _glusterEnabled:
serv = ge.GlusterService()
@@ -2239,6 +2266,23 @@
('<vmId> <sdUUID> <imgUUID> <baseVolUUID> <volUUID>',
"Take a live snapshot"
)),
+ 'coreDump': (serv.coreDump,
+ ('<vmId> <file> [live=<True>] '
+ '[crash=<True>] [bypass-cache=<True>] '
+ '[reset=<True>] [memory-only=<True>]',
+ "get memeory dump or migration file"
+ 'optional params:',
+ 'crash: crash the domain after core dump'
+ 'default False',
+ 'live: perform a live core dump if supported, '
+ 'default False',
+ 'bypass-cache: avoid file system cache when saving'
+ 'default False',
+ 'reset: reset the domain after core dump'
+ 'default False',
+ "memory-only: dump domain's memory only"
+ 'default False'
+ )),
}
if _glusterEnabled:
commands.update(ge.getGlusterCmdDict(serv))
--
To view, visit http://gerrit.ovirt.org/7329
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: If4aac9e747dc7aa64a6ff5ef256a7a4375aa2bb5
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: ShaoHe Feng <shaohef(a)linux.vnet.ibm.com>
8 years, 1 month
Change in vdsm[master]: [WIP] Added glusterVolumeTop verb
by tjeyasin@redhat.com
Hello Ayal Baron, Bala.FA, Saggi Mizrahi, Federico Simoncelli, Dan Kenigsberg,
I'd like you to do a code review. Please visit
http://gerrit.ovirt.org/7844
to review the following change.
Change subject: [WIP] Added glusterVolumeTop verb
......................................................................
[WIP] Added glusterVolumeTop verb
Added glusterVolumeTopOpen verb
Added glusterVolumeTopRead verb
Added glusterVolumeTopWrite verb
Added glusterVolumeTopOpenDir verb
Added glusterVolumeTopReadDir verb
Added glusterVolumeTopReadPerf
verb Added glusterVolumeTopWritePerf verb
Following is the output structure of glusterVolumeTopOpen
{'statusCode' : CODE,
'brickCount': BRICK-COUNT,
'bricks': {BRICK-NAME: {'count':FILE-COUNT,
'currentOpenFds': CURRENT-OPEN-FDS-COUNT,
'maxOpen': MAX-OPEN,
'maxOpenTime': MAX-OPEN-TIME,
'files': [{FILE-NAME: FILE-OPEN-COUNT}, ...]
}, ...} }
Following is the output structure of glusterVolumeTopRead
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {
'count': FILE-COUNT,
'files': [{FILE-NAME: FILE-READ-COUNT}, ...]}
,...}}
Following is the output structure glusterVolumeTopWrite
{'statusCode' : CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'count': FILE-COUNT,
'files': [{FILE-NAME: FILE-WRITE-COUNT}...]}
,...}}
Following is the output structure glusterVolumeTopOpenDir
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'count':OPEN-DIR-COUNT,
'files': [{DIR-NAME: DIR-OPEN-COUNT}, ...]}
,...}
Following is the output structure glusterVolumeTopReadDir
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'count':READ-DIR-COUNT,
'files': [{DIR-NAME: DIR-READ-COUNT}, ...]}
,...}
Following is the output structure glusterVolumeTopReadPerf
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'fileCount':READ-COUNT,
'throughput': BRICK-WISE-READ-THROUGHPUT,
' timeTaken': TIME-TAKEN,
'files': [{FILE-NAME:
{'throughput':FILE-READ-THROUGHPUT,
'time': TIME}}, ...]}
,...}}
Following is the output structure glusterVolumeTopWritePerf
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'fileCount':WRITE-COUNT,
'throughput': BRICK-WISE-WRITE-THROUGHPUT,
' timeTaken': TIME-TAKEN,
'files': [{FILE-NAME:
{'throughput':FILE-WRITE-THROUGHPUT,
'time': TIME}}, ...]}
,...}}
Change-Id: I96486363a9acb7472014a67fcd2d5185d4f3c428
Signed-off-by: Timothy Asir <tjeyasin(a)redhat.com>
---
M vdsm/gluster/api.py
M vdsm/gluster/cli.py
M vdsm/gluster/exception.py
M vdsm_cli/vdsClientGluster.py
4 files changed, 372 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/44/7844/1
diff --git a/vdsm/gluster/api.py b/vdsm/gluster/api.py
index e52430b..3f493e0 100644
--- a/vdsm/gluster/api.py
+++ b/vdsm/gluster/api.py
@@ -241,6 +241,61 @@
status = self.svdsmProxy.glusterVolumeProfileInfo(volumeName)
return {'profileInfo': status}
+ @exportAsVerb
+ def volumeTopOpen(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopOpen(volumeName,
+ brickName, count)
+ return {'topOpen': status}
+
+ @exportAsVerb
+ def volumeTopRead(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopRead(volumeName,
+ brickName, count)
+ return {'topRead': status}
+
+ @exportAsVerb
+ def volumeTopWrite(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopWrite(volumeName,
+ brickName, count)
+ return {'topWrite': status}
+
+ @exportAsVerb
+ def volumeTopOpenDir(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopOpenDir(volumeName,
+ brickName, count)
+ return {'topOpenDir': status}
+
+ @exportAsVerb
+ def volumeTopWriteDir(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopWriteDir(volumeName,
+ brickName, count)
+ return {'topWriteDir': status}
+
+ @exportAsVerb
+ def volumeTopReadPerf(self, volumeName, blockSize=None, count=None,
+ brickName=None, listCount=None, options=None):
+ status = self.svdsmProxy.glusterVolumeTopReadPerf(volumeName,
+ blockSize,
+ count,
+ brickName,
+ listCount)
+ return {'topReadPerf': status}
+
+ @exportAsVerb
+ def volumeTopWritePerf(self, volumeName, blockSize=None, count=None,
+ brickName=None, listCount=None, options=None):
+ status = self.svdsmProxy.glusterVolumeTopWritePerf(volumeName,
+ blockSize,
+ count,
+ brickName,
+ listCount)
+ return {'topWritePerf': status}
+
def getGlusterMethods(gluster):
l = []
diff --git a/vdsm/gluster/cli.py b/vdsm/gluster/cli.py
index b91a04f..ba4768c 100644
--- a/vdsm/gluster/cli.py
+++ b/vdsm/gluster/cli.py
@@ -334,6 +334,66 @@
return volumeInfoDict
+def _parseGlusterVolumeTopOpen(tree):
+ bricks = {}
+ for brick in tree.findall('volTop/brick'):
+ fileList = []
+ for file in brick.findall('file'):
+ fileList.append({file.find('filename').text:
+ file.find('count').text})
+ bricks[brick.find('name').text] = {
+ 'count': brick.find('members').text,
+ 'currentOpen': brick.find('currentOpen').text,
+ 'maxOpen': brick.find('maxOpen').text,
+ 'maxOpenTime': brick.find('maxOpenTime').text,
+ 'files': fileList}
+ status = {
+ 'topOp': tree.find('volTop/topOp').text,
+ 'brickCount': tree.find('volTop/brickCount').text,
+ 'statusCode': tree.find('opRet').text,
+ 'bricks': bricks}
+ return status
+
+
+def _parseGlusterVolumeTop(tree):
+ bricks = {}
+ for brick in tree.findall('volTop/brick'):
+ fileList = []
+ for fileTag in brick.findall('file'):
+ fileList.append({fileTag.find('filename').text:
+ fileTag.find('count').text})
+ bricks[brick.find('name').text] = {
+ 'count': brick.find('members').text,
+ 'files': fileList}
+ status = {
+ 'topOp': tree.find('volTop/topOp').text,
+ 'brickCount': tree.find('volTop/brickCount').text,
+ 'statusCode': tree.find('opRet').text,
+ 'bricks': bricks}
+ return status
+
+
+def _parseGlusterVolumeTopPerf(tree):
+ bricks = {}
+ for brick in tree.findall('volTop/brick'):
+ fileList = []
+ for fileTag in brick.findall('file'):
+ fileList.append({fileTag.find('filename').text:
+ {'count': fileTag.find('count').text,
+ 'time': fileTag.find('time').text}})
+ bricks[brick.find('name').text] = {
+ 'count': brick.find('members').text,
+ 'throughput': brick.find('throughput').text,
+ 'timeTaken': brick.find('timeTaken').text,
+ 'files': fileList}
+ status = {
+ 'topOp': tree.find('volTop/topOp').text,
+ 'brickCount': tree.find('volTop/brickCount').text,
+ 'statusCode': tree.find("opRet").text,
+ 'bricks': bricks}
+ return status
+
+
def _parseGlusterVolumeProfileInfo(tree):
bricks = {}
for brick in tree.findall('volProfile/brick'):
@@ -819,3 +879,132 @@
return _parseGlusterVolumeProfileInfo(xmltree)
except:
raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopOpen(volumeName, brickName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "open"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopOpenFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTopOpen(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopRead(volumeName, brickName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "read"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopReadFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTop(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopWrite(volumeName, brickName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "write"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopWriteFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTop(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopOpenDir(volumeName, brickName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "opendir"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopOpenDirFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTop(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopReadDir(volumeName, bricName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "write"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopReadDirFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTop(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopReadPerf(volumeName, blockSize=None, count=None,
+ brickName=None, listCount=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "read-perf"]
+ if blockSize:
+ command += ["bs", "%s" % blockSize]
+ if count:
+ command += ["count", "%s" % count]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if listCount:
+ command += ["list-cnt", "%s" % listCount]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopReadPerfFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTopPerf(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopWritePerf(volumeName, blockSize=None, count=None,
+ brickName=None, listCount=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "write-perf"]
+ if blockSize:
+ command += ["bs", "%s" % blockSize]
+ if count:
+ command += ["count", "%s" % count]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if listCount:
+ command += ["list-cnt", "%s" % listCount]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopWritePerfFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTopPerf(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
diff --git a/vdsm/gluster/exception.py b/vdsm/gluster/exception.py
index bc20dd0..b392ec8 100644
--- a/vdsm/gluster/exception.py
+++ b/vdsm/gluster/exception.py
@@ -343,6 +343,41 @@
message = "Volume profile info failed"
+class GlusterVolumeTopOpenFailedException(GlusterVolumeException):
+ code = 4161
+ message = "Volume top open failed"
+
+
+class GlusterVolumeTopReadFailedException(GlusterVolumeException):
+ code = 4162
+ message = "Volume top read failed"
+
+
+class GlusterVolumeTopWriteFailedException(GlusterVolumeException):
+ code = 4163
+ message = "Volume top write failed"
+
+
+class GlusterVolumeTopOpenDirFailedException(GlusterVolumeException):
+ code = 4164
+ message = "Volume top open dir failed"
+
+
+class GlusterVolumeTopReadDirFailedException(GlusterVolumeException):
+ code = 4165
+ message = "Volume top read dir failed"
+
+
+class GlusterVolumeTopReadPerfFailedException(GlusterVolumeException):
+ code = 4166
+ message = "Volume top read perf failed"
+
+
+class GlusterVolumeTopWritePerfFailedException(GlusterVolumeException):
+ code = 4167
+ message = "Volume top write perf failed"
+
+
# Host
class GlusterHostException(GlusterException):
code = 4400
diff --git a/vdsm_cli/vdsClientGluster.py b/vdsm_cli/vdsClientGluster.py
index 8422695..3663c63 100644
--- a/vdsm_cli/vdsClientGluster.py
+++ b/vdsm_cli/vdsClientGluster.py
@@ -221,6 +221,41 @@
pp.pprint(status)
return status['status']['code'], status['status']['message']
+ def do_glusterVolumeTopOpen(self, args):
+ status = self.s.glusterVolumeTopOpen(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopRead(self, args):
+ status = self.s.glusterVolumeTopRead(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopWrite(self, args):
+ status = self.s.glusterVolumeTopWrite(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopOpenDir(self, args):
+ status = self.s.glusterVolumeTopOpenDir(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopReadDir(self, args):
+ status = self.s.glusterVolumeTopReadDir(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopReadPerf(self, args):
+ status = self.s.glusterVolumeTop(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopWritePerf(self, args):
+ status = self.s.glusterVolumeTop(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
def getGlusterCmdDict(serv):
return {
@@ -403,4 +438,62 @@
('<volume_name>\n\t<volume_name> is existing volume name',
'get gluster volume profile info'
)),
+ 'glusterVolumeTopOpen':
+ (serv.do_glusterVolumeTopOpen,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get volume top open fd count and maximum fd count of '
+ 'a given volume with its all brick or specified brick'
+ )),
+ 'glusterVolumeTopRead':
+ (serv.do_glusterVolumeTopRead,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of highest read calls on each brick or '
+ 'a specified brick of a volume'
+ )),
+ 'glusterVolumeTopWrite':
+ (serv.do_glusterVolumeTopWrite,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of highest write calls on each brick or '
+ 'a specified brick of a volume'
+ )),
+ 'glusterVolumeTopOpenDir':
+ (serv.do_glusterVolumeTopOpenDir,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of highest open calls on directories of each brick '
+ 'or a specified brick of a volume'
+ )),
+ 'glusterVolumeTopReadDir':
+ (serv.do_glusterVolumeTopReadDir,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of highest read calls on directories of each brick '
+ 'or a specified brick of a volume'
+ )),
+ 'glusterVolumeTopReadPerf':
+ (serv.do_glusterVolumeTopReadPerf,
+ ('<volume_name> [block_size=<block_size>] '
+ '[count=<count>] [brick=<existing_brick>] '
+ '[list_count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of read throughput of files on bricks. '
+ 'if the block size and the count is not specified, '
+ 'it will give the output based on historical data'
+ )),
+ 'glusterVolumeTopWritePerf':
+ (serv.do_glusterVolumeTopWritePerf,
+ ('<volume_name> [block_size=<block_size>] '
+ '[count=<count>] [brick=<existing_brick>] '
+ '[list_count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of write throughput of files on bricks'
+ )),
}
--
To view, visit http://gerrit.ovirt.org/7844
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I96486363a9acb7472014a67fcd2d5185d4f3c428
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Timothy Asir <tjeyasin(a)redhat.com>
Gerrit-Reviewer: Ayal Baron <abaron(a)redhat.com>
Gerrit-Reviewer: Bala.FA <barumuga(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Federico Simoncelli <fsimonce(a)redhat.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
8 years, 1 month
Change in vdsm[master]: Added gluster tag support in getAllTasks()
by barumuga@redhat.com
Hello Ayal Baron, Timothy Asir, Saggi Mizrahi, Federico Simoncelli, Dan Kenigsberg,
I'd like you to do a code review. Please visit
http://gerrit.ovirt.org/7579
to review the following change.
Change subject: Added gluster tag support in getAllTasks()
......................................................................
Added gluster tag support in getAllTasks()
If param tag is empty, all tasks including gluster tasks are returned,
else tasks those tags are in param tag list are returned.
As below verbs are not consumed by engine/RHS-C yet, its OK to differ in
compatibility issue now.
glusterVolumeRebalanceStart
glusterVolumeRebalanceStatus
glusterVolumeReplaceBrickStart
glusterVolumeReplaceBrickStatus
glusterVolumeRemoveBrickStart
glusterVolumeRemoveBrickStatus
Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432
Signed-off-by: Bala.FA <barumuga(a)redhat.com>
---
M tests/gluster_cli_tests.py
M vdsm/gluster/cli.py
M vdsm/gluster/exception.py
M vdsm/storage/taskManager.py
4 files changed, 367 insertions(+), 95 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/79/7579/1
diff --git a/tests/gluster_cli_tests.py b/tests/gluster_cli_tests.py
index f442893..9c6357c 100644
--- a/tests/gluster_cli_tests.py
+++ b/tests/gluster_cli_tests.py
@@ -28,6 +28,7 @@
from gluster import cli as gcli
except ImportError:
pass
+import xml.etree.cElementTree as etree
class GlusterCliTests(TestCaseBase):
@@ -115,3 +116,74 @@
def test_parsePeerStatus(self):
self._parsePeerStatus_empty_test()
self._parsePeerStatus_test()
+
+ def _parseVolumeStatusAll_test(self):
+ out = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<cliOutput>
+ <opRet>0</opRet>
+ <opErrno>0</opErrno>
+ <opErrstr></opErrstr>
+ <volumes>
+ <volume>
+ <name>V1</name>
+ <id>03eace73-9197-49d0-a877-831bc6e9dac2</id>
+ <tasks>
+ <task>
+ <name>rebalance</name>
+ <id>12345473-9197-49d0-a877-831bc6e9dac2</id>
+ </task>
+ </tasks>
+ </volume>
+ <volume>
+ <name>V2</name>
+ <id>03eace73-1237-49d0-a877-831bc6e9dac2</id>
+ <tasks>
+ <task>
+ <name>replace-brick</name>
+ <id>12345473-1237-49d0-a877-831bc6e9dac2</id>
+ <sourceBrick>192.168.122.167:/tmp/V2-b1</sourceBrick>
+ <destBrick>192.168.122.168:/tmp/V2-b1</destBrick>
+ </task>
+ </tasks>
+ </volume>
+ <volume>
+ <name>V3</name>
+ <id>03eace73-1237-1230-a877-831bc6e9dac2</id>
+ <tasks>
+ <task>
+ <name>remove-brick</name>
+ <id>12345473-1237-1230-a877-831bc6e9dac2</id>
+ <BrickCount>2</BrickCount>
+ <brick>192.168.122.167:/tmp/V3-b1</brick>
+ <brick>192.168.122.168:/tmp/V3-b1</brick>
+ </task>
+ </tasks>
+ </volume>
+ </volumes>
+</cliOutput>"""
+ tree = etree.fromstring(out)
+ status = gcli._parseVolumeStatusAll(tree)
+ self.assertEquals(status,
+ {'12345473-1237-1230-a877-831bc6e9dac2':
+ {'bricks': ['192.168.122.167:/tmp/V3-b1',
+ '192.168.122.168:/tmp/V3-b1'],
+ 'taskType': 'remove-brick',
+ 'volumeId':
+ '03eace73-1237-1230-a877-831bc6e9dac2',
+ 'volumeName': 'V3'},
+ '12345473-1237-49d0-a877-831bc6e9dac2':
+ {'bricks': ['192.168.122.167:/tmp/V2-b1',
+ '192.168.122.168:/tmp/V2-b1'],
+ 'taskType': 'replace-brick',
+ 'volumeId':
+ '03eace73-1237-49d0-a877-831bc6e9dac2',
+ 'volumeName': 'V2'},
+ '12345473-9197-49d0-a877-831bc6e9dac2':
+ {'bricks': [],
+ 'taskType': 'rebalance',
+ 'volumeId':
+ '03eace73-9197-49d0-a877-831bc6e9dac2',
+ 'volumeName': 'V1'}})
+
+ def test_parseVolumeStatusAll(self):
+ self._parseVolumeStatusAll_test()
diff --git a/vdsm/gluster/cli.py b/vdsm/gluster/cli.py
index 95de106..1f464f6 100644
--- a/vdsm/gluster/cli.py
+++ b/vdsm/gluster/cli.py
@@ -84,6 +84,55 @@
raise ge.GlusterCmdFailedException(rc=rv, err=[msg])
+class TaskType:
+ REBALANCE = 'rebalance'
+ REPLACE_BRICK = 'replace-brick'
+ REMOVE_BRICK = 'remove-brick'
+
+
+def _parseVolumeStatusAll(tree):
+ """
+ returns {TaskId: {'volumeName': VolumeName,
+ 'volumeId': VolumeId,
+ 'taskType': TaskType,
+ 'bricks': BrickList}, ...}
+ """
+ tasks = {}
+ for el in tree.findall('volumes/volume'):
+ volumeName = el.find('name').text
+ volumeId = el.find('id').text
+ for c in el.findall('tasks/task'):
+ taskType = c.find('name').text
+ taskId = c.find('id').text
+ bricks = []
+ if taskType == TaskType.REPLACE_BRICK:
+ bricks.append(c.find('sourceBrick').text)
+ bricks.append(c.find('destBrick').text)
+ elif taskType == TaskType.REMOVE_BRICK:
+ for b in c.findall('brick'):
+ bricks.append(b.text)
+ elif taskType == TaskType.REBALANCE:
+ pass
+ tasks[taskId] = {'volumeName': volumeName,
+ 'volumeId': volumeId,
+ 'taskType': taskType,
+ 'bricks': bricks}
+ return tasks
+
+
+@exportToSuperVdsm
+def volumeStatusAll():
+ command = _getGlusterVolCmd() + ["status", "all"]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeStatusAllFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseVolumeStatusAll(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
def _parseVolumeInfo(out):
if not out[0].strip():
del out[0]
@@ -300,11 +349,15 @@
command.append("start")
if force:
command.append("force")
- rc, out, err = _execGluster(command)
- if rc:
- raise ge.GlusterVolumeRebalanceStartFailedException(rc, out, err)
- else:
- return True
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRebalanceStartFailedException(rc=e.rc,
+ err=e.err)
+ try:
+ return {'taskId': xmltree.find('id').text}
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
@exportToSuperVdsm
@@ -312,84 +365,147 @@
command = _getGlusterVolCmd() + ["rebalance", volumeName, "stop"]
if force:
command.append('force')
- rc, out, err = _execGluster(command)
- if rc:
- raise ge.GlusterVolumeRebalanceStopFailedException(rc, out, err)
- else:
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRebalanceStopFailedException(rc=e.rc,
+ err=e.err)
+
+
+class TaskStatus():
+ RUNNING = 'RUNNING'
+ FAILED = 'FAILED'
+ COMPLETED = 'COMPLETED'
+
+
+def _parseVolumeRebalanceRemoveBrickStatus(xmltree, mode):
+ """
+ returns {'taskId': UUID,
+ 'host': [{'name': NAME,
+ 'id': HOSTID,
+ 'filesScanned': INT,
+ 'filesMoved': INT,
+ 'filesFailed': INT,
+ 'totalSizeMoved': INT,
+ 'status': TaskStatus},...]
+ 'summary': {'filesScanned': INT,
+ 'filesMoved': INT,
+ 'filesFailed': INT,
+ 'totalSizeMoved': INT,
+ 'status': TaskStatus}}
+ """
+ if mode == 'rebalance':
+ tree = xmltree.find('volRebalance')
+ elif mode == 'remove-brick':
+ tree = xmltree.find('volRemoveBrick')
+ else:
+ return
+ status = \
+ {'taskId': tree.find('id').text,
+ 'summary': \
+ {'filesScanned': int(tree.find('summary/filesScanned').text),
+ 'filesMoved': int(tree.find('summary/filesMoved').text),
+ 'filesFailed': int(tree.find('summary/filesFailed').text),
+ 'totalSizeMoved': int(tree.find('summary/totalSizeMoved').text),
+ 'status': tree.find('summary/status').text},
+ 'host': []}
+ for el in tree.findall('node'):
+ status['host'].append({'name': el.find('name').text,
+ 'id': el.find('id').text,
+ 'filesScanned':
+ int(el.find('filesScanned').text),
+ 'filesMoved': int(el.find('filesMoved').text),
+ 'filesFailed': int(el.find('filesFailed').text),
+ 'totalSizeMoved':
+ int(el.find('totalSizeMoved').text),
+ 'status': el.find('status').text})
+ return status
+
+
+def _parseVolumeRebalanceStatus(tree):
+ return _parseVolumeRebalanceRemoveBrickStatus(tree, 'rebalance')
@exportToSuperVdsm
def volumeRebalanceStatus(volumeName):
- rc, out, err = _execGluster(_getGlusterVolCmd() + ["rebalance", volumeName,
- "status"])
- if rc:
- raise ge.GlusterVolumeRebalanceStatusFailedException(rc, out, err)
- if 'in progress' in out[0]:
- return BrickStatus.RUNNING, "\n".join(out)
- elif 'complete' in out[0]:
- return BrickStatus.COMPLETED, "\n".join(out)
- else:
- return BrickStatus.UNKNOWN, "\n".join(out)
+ command = _getGlusterVolCmd() + ["rebalance", volumeName, "status"]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRebalanceStatusFailedException(rc=e.rc,
+ err=e.err)
+ try:
+ return _parseVolumeRebalanceStatus(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
@exportToSuperVdsm
def volumeReplaceBrickStart(volumeName, existingBrick, newBrick):
- rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick",
- volumeName,
- existingBrick, newBrick,
- "start"])
- if rc:
- raise ge.GlusterVolumeReplaceBrickStartFailedException(rc, out, err)
- else:
- return True
+ command = _getGlusterVolCmd() + ["replace-brick", volumeName,
+ existingBrick, newBrick, "start"]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeReplaceBrickStartFailedException(rc=e.rc,
+ err=e.err)
+ try:
+ return {'taskId': xmltree.find('id').text}
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
@exportToSuperVdsm
def volumeReplaceBrickAbort(volumeName, existingBrick, newBrick):
- rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick",
- volumeName,
- existingBrick, newBrick,
- "abort"])
- if rc:
- raise ge.GlusterVolumeReplaceBrickAbortFailedException(rc, out, err)
- else:
+ command = _getGlusterVolCmd() + ["replace-brick", volumeName,
+ existingBrick, newBrick, "abort"]
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeReplaceBrickAbortFailedException(rc=e.rc,
+ err=e.err)
@exportToSuperVdsm
def volumeReplaceBrickPause(volumeName, existingBrick, newBrick):
- rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick",
- volumeName,
- existingBrick, newBrick,
- "pause"])
- if rc:
- raise ge.GlusterVolumeReplaceBrickPauseFailedException(rc, out, err)
- else:
+ command = _getGlusterVolCmd() + ["replace-brick", volumeName,
+ existingBrick, newBrick, "pause"]
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeReplaceBrickPauseFailedException(rc=e.rc,
+ err=e.err)
+
+
+def _parseVolumeReplaceBrickStatus(tree):
+ """
+ returns {'taskId': UUID,
+ 'filesMoved': INT,
+ 'movingFile': STRING,
+ 'status': TaskStatus}}
+ """
+ return {'taskId': tree.find('volReplaceBrick/id').text,
+ 'filesMoved': int(tree.find('volReplaceBrick/filesMoved').text),
+ 'movingFile': tree.find('volReplaceBrick/movingFile').text,
+ 'status': tree.find('volReplaceBrick/status').text}
@exportToSuperVdsm
def volumeReplaceBrickStatus(volumeName, existingBrick, newBrick):
- rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick",
- volumeName,
- existingBrick, newBrick,
- "status"])
- if rc:
- raise ge.GlusterVolumeReplaceBrickStatusFailedException(rc, out,
- err)
- message = "\n".join(out)
- statLine = out[0].strip().upper()
- if BrickStatus.PAUSED in statLine:
- return BrickStatus.PAUSED, message
- elif statLine.endswith('MIGRATION COMPLETE'):
- return BrickStatus.COMPLETED, message
- elif statLine.startswith('NUMBER OF FILES MIGRATED'):
- return BrickStatus.RUNNING, message
- elif statLine.endswith("UNKNOWN"):
- return BrickStatus.UNKNOWN, message
- else:
- return BrickStatus.NA, message
+ command = _getGlusterVolCmd() + ["replace-brick", volumeName,
+ existingBrick, newBrick, "status"]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeReplaceBrickStatusFailedException(rc=e.rc,
+ err=e.err)
+ try:
+ return _parseVolumeReplaceBrickStatus(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
@exportToSuperVdsm
@@ -399,12 +515,12 @@
existingBrick, newBrick, "commit"]
if force:
command.append('force')
- rc, out, err = _execGluster(command)
- if rc:
- raise ge.GlusterVolumeReplaceBrickCommitFailedException(rc, out,
- err)
- else:
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeReplaceBrickCommitFailedException(rc=e.rc,
+ err=e.err)
@exportToSuperVdsm
@@ -413,12 +529,15 @@
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["start"]
-
- rc, out, err = _execGluster(command)
- if rc:
- raise ge.GlusterVolumeRemoveBrickStartFailedException(rc, out, err)
- else:
- return True
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRemoveBrickStartFailedException(rc=e.rc,
+ err=e.err)
+ try:
+ return {'taskId': xmltree.find('id').text}
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
@exportToSuperVdsm
@@ -427,12 +546,16 @@
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["stop"]
- rc, out, err = _execGluster(command)
-
- if rc:
- raise ge.GlusterVolumeRemoveBrickStopFailedException(rc, out, err)
- else:
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRemoveBrickStopFailedException(rc=e.rc,
+ err=e.err)
+
+
+def _parseVolumeRemoveBrickStatus(tree):
+ return _parseVolumeRebalanceRemoveBrickStatus(tree, 'remove-brick')
@exportToSuperVdsm
@@ -441,12 +564,15 @@
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["status"]
- rc, out, err = _execGluster(command)
-
- if rc:
- raise ge.GlusterVolumeRemoveBrickStatusFailedException(rc, out, err)
- else:
- return "\n".join(out)
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRemoveBrickStatusFailedException(rc=e.rc,
+ err=e.err)
+ try:
+ return _parseVolumeRemoveBrickStatus(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
@exportToSuperVdsm
@@ -455,12 +581,12 @@
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["commit"]
- rc, out, err = _execGluster(command)
-
- if rc:
- raise ge.GlusterVolumeRemoveBrickCommitFailedException(rc, out, err)
- else:
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRemoveBrickCommitFailedException(rc=e.rc,
+ err=e.err)
@exportToSuperVdsm
@@ -469,12 +595,12 @@
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["force"]
- rc, out, err = _execGluster(command)
-
- if rc:
- raise ge.GlusterVolumeRemoveBrickForceFailedException(rc, out, err)
- else:
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRemoveBrickForceFailedException(rc=e.rc,
+ err=e.err)
@exportToSuperVdsm
diff --git a/vdsm/gluster/exception.py b/vdsm/gluster/exception.py
index f4f497b..f209885 100644
--- a/vdsm/gluster/exception.py
+++ b/vdsm/gluster/exception.py
@@ -323,6 +323,11 @@
message = "Volume remove brick force failed"
+class GlusterVolumeStatusAllFailedException(GlusterVolumeException):
+ code = 4158
+ message = "Volume status all failed"
+
+
# Host
class GlusterHostException(GlusterException):
code = 4400
diff --git a/vdsm/storage/taskManager.py b/vdsm/storage/taskManager.py
index 3bc12f3..0a269cd 100644
--- a/vdsm/storage/taskManager.py
+++ b/vdsm/storage/taskManager.py
@@ -25,6 +25,12 @@
import storage_exception as se
from task import Task, Job, TaskCleanType
from threadPool import ThreadPool
+try:
+ from gluster import cli as gcli
+ from gluster import exception as ge
+ _glusterEnabled = True
+except ImportError:
+ _glusterEnabled = False
class TaskManager:
@@ -113,19 +119,82 @@
self.log.debug("Return: %s", subRes)
return subRes
- def getAllTasks(self):
+ def _getAllGlusterTasks(self):
"""
- Return Tasks for all public tasks.
+ Return all gluster tasks
+ """
+ subRes = {}
+ if not _glusterEnabled:
+ return subRes
+
+ for taskId, value in gcli.volumeStatusAll():
+ msg = ''
+ state = ''
+ try:
+ if value['taskType'] == gcli.TaskType.REBALANCE:
+ status = gcli.volumeRebalanceStatus(value['volumeName'])
+ msg = ('Files [scanned: %d, moved: %d, failed: %d], '
+ 'Total size moved: %d') % \
+ (status['summary']['filesScanned'],
+ status['summary']['filesMoved'],
+ status['summary']['filesFailed'],
+ status['summary']['totalSizeMoved'])
+ state = status['summary']['status']
+ elif value['taskType'] == gcli.TaskType.REMOVE_BRICK:
+ status = gcli.volumeRemoveBrickStatus(value['volumeName'],
+ value['bricks'])
+ msg = ('Files [scanned: %d, moved: %d, failed: %d], '
+ 'Total size moved: %d') % \
+ (status['summary']['filesScanned'],
+ status['summary']['filesMoved'],
+ status['summary']['filesFailed'],
+ status['summary']['totalSizeMoved'])
+ state = status['summary']['status']
+ elif value['taskType'] == gcli.TaskType.REPLACE_BRICK:
+ status = gcli.volumeReplaceBrickStatus(value['volumeName'],
+ value['bricks'][0],
+ value['bricks'][1])
+ msg = 'Files moved: %d, Moving file: %s' % \
+ (status['filesMoved'], status['movingFile'])
+ state = status['status']
+ except ge.GlusterException:
+ self.log.error("gluster exception occured", exc_info=True)
+
+ subRes[taskId] = {"id": taskId,
+ "verb": value['volumeName'],
+ "state": state,
+ "code": value['taskType'],
+ "message": msg,
+ "result": '',
+ "tag": 'gluster'}
+ return subRes
+
+ def getAllTasks(self, tag=[]):
+ """
+ Return Tasks for all public tasks if param tag is empty,
+ else return tasks those tags are in param tag.
"""
self.log.debug("Entry.")
subRes = {}
for taskID, task in self._tasks.items():
try:
- subRes[taskID] = task.getDetails()
+ if not tag:
+ subRes[taskID] = task.getDetails()
+ elif task.getTags() in tag:
+ subRes[taskID] = task.getDetails()
except se.UnknownTask:
# Return info for existing tasks only.
self.log.warn("Unknown task %s. Maybe task was already "
"cleared.", taskID)
+
+ try:
+ if not tag:
+ subRes.update(self._getAllGlusterTasks())
+ elif 'gluster' in tag:
+ subRes.update(self._getAllGlusterTasks())
+ except ge.GlusterException:
+ self.log.error("gluster exception occured", exc_info=True)
+
self.log.debug("Return: %s", subRes)
return subRes
--
To view, visit http://gerrit.ovirt.org/7579
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Bala.FA <barumuga(a)redhat.com>
Gerrit-Reviewer: Ayal Baron <abaron(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Federico Simoncelli <fsimonce(a)redhat.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
Gerrit-Reviewer: Timothy Asir <tjeyasin(a)redhat.com>
8 years, 1 month
Change in vdsm[master]: add xmlrpcTests for cpu pinning
by lvroyce@linux.vnet.ibm.com
Royce Lv has uploaded a new change for review.
Change subject: add xmlrpcTests for cpu pinning
......................................................................
add xmlrpcTests for cpu pinning
Change-Id: Ia865f0d5eb4c9aabff6cef57b088c55df73a309e
Signed-off-by: Royce Lv<lvroyce(a)linux.vnet.ibm.com>
---
M tests/functional/xmlrpcTests.py
M tests/vdsClientTests.py
2 files changed, 40 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/12/8412/1
diff --git a/tests/functional/xmlrpcTests.py b/tests/functional/xmlrpcTests.py
index 9c865db..2684d0f 100644
--- a/tests/functional/xmlrpcTests.py
+++ b/tests/functional/xmlrpcTests.py
@@ -174,3 +174,33 @@
destroyResult = self.s.destroy(VMID)
self.assertVdsOK(destroyResult)
+
+ def testCpuPin(self):
+ self.skipNoKVM()
+
+ def assertVMAndGuestUp():
+ self.assertVmUp(VMID)
+ self.assertGuestUp(VMID)
+
+ VMID = '77777777-ffff-3333-aaaa-222222222222'
+
+ with kernelBootImages() as (kernelPath, initramfsPath):
+ conf = {'display': 'vnc',
+ 'kernel': kernelPath,
+ 'initrd': initramfsPath,
+ 'kernelArgs': 'rd.break=cmdline rd.shell rd.skipfsck',
+ 'kvmEnable': 'true',
+ 'memSize': '256',
+ 'vmId': VMID,
+ 'vmName': 'vdsm_testPinVM',
+ 'vmType': 'kvm',
+ 'cpuPinning': {'emulator': '0', '0': '1'}}
+
+ try:
+ self.assertVdsOK(self.s.create(conf))
+ # wait 65 seconds for VM to come up until timeout
+ self.retryAssert(assertVMAndGuestUp, 65, 1)
+ finally:
+ destroyResult = self.s.destroy(VMID)
+
+ self.assertVdsOK(destroyResult)
diff --git a/tests/vdsClientTests.py b/tests/vdsClientTests.py
index abf3242..57e6e74 100644
--- a/tests/vdsClientTests.py
+++ b/tests/vdsClientTests.py
@@ -118,3 +118,13 @@
allArgs[-1] = 'cpuPinning={0:1,1:0}'
r4 = serv.do_create(['/dev/null'] + allArgs)
self.assertNotEquals(r4, expectResult)
+
+ # test just pin emulator
+ allArgs[-1] = "cpuPinning={emulator:1-3}"
+ r5 = serv.do_create(['/dev/null'] + allArgs)
+ self.assertEquals(r5['cpuPinning'],{'emulator':'1-3'})
+
+ # test pin emultor and vcpu
+ allArgs[-1] = "cpuPinning={emulator:1-3,1:0}"
+ r6 = serv.do_create(['/dev/null'] + allArgs)
+ self.assertEquals(r6['cpuPinning'],{'emulator':'1-3','1':'0'})
--
To view, visit http://gerrit.ovirt.org/8412
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ia865f0d5eb4c9aabff6cef57b088c55df73a309e
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Royce Lv <lvroyce(a)linux.vnet.ibm.com>
9 years, 2 months
Change in vdsm[master]: [WIP] Towards a more (block) secure HSM.
by ewarszaw@redhat.com
Eduardo has uploaded a new change for review.
Change subject: [WIP] Towards a more (block) secure HSM.
......................................................................
[WIP] Towards a more (block) secure HSM.
Change-Id: I30df4ee5cdb6b44cf14d8cb155436aac7442a07d
---
M vdsm/storage/hsm.py
M vdsm/storage/lvm.py
M vdsm/storage/sp.py
3 files changed, 25 insertions(+), 5 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/18/2218/1
--
To view, visit http://gerrit.ovirt.org/2218
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I30df4ee5cdb6b44cf14d8cb155436aac7442a07d
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Eduardo <ewarszaw(a)redhat.com>
9 years, 4 months
Change in vdsm[master]: [WIP] Implement a process to do dangerous IO in C
by smizrahi@redhat.com
Saggi Mizrahi has uploaded a new change for review.
Change subject: [WIP] Implement a process to do dangerous IO in C
......................................................................
[WIP] Implement a process to do dangerous IO in C
This replaces the process pool with a process that can serve multiple
requests written in C.
This implementation is much more scalable and lightweight. Should solve
bugs related to running out of helpers, logging getting suck, python
forking deadlocking, running out of memory and other things as well.
The communication between VDSM and the IOProcess is done with json
objects.
The IOProcess starts with 3 thread:
1. requestReader - reads requests from the pipe, builds a DOM
representation of it and queues it up for handling
2. responseWriter - gets response DOMs from the queue converts them to a
JSON string and send it over the pipe
3. requestHandler - pops requests from the queue and provisions threads
for handling them. Currently we I just allocate a new thread per
request. If there is ever a need to have a thread pool this is where
the load balancing is going to sit.
Each request gets the are as a JsonNode and returns a response that is a
JsonNode as well. Most exported functions are pretty trivial and are a
good example on how to write new ones.
Unlink the ProcessPoolHelper, high level commands sit of the OopWrapper
and are run from the client side instead of being implemented in C on
the IOProcess side.
Change-Id: Ie4664d5330debbe38ba33b74ebb586ac42913b4a
Signed-off-by: Saggi Mizrahi <smizrahi(a)redhat.com>
---
M configure.ac
M tests/Makefile.am
A tests/ioprocessTests.py
A tests/outOfProcessTests.py
D tests/processPoolTests.py
M vdsm.spec.in
M vdsm/constants.py.in
M vdsm/storage/Makefile.am
M vdsm/storage/fileSD.py
M vdsm/storage/fileUtils.py
M vdsm/storage/fileVolume.py
A vdsm/storage/ioprocess.py
A vdsm/storage/ioprocess/.gitignore
A vdsm/storage/ioprocess/Makefile.am
A vdsm/storage/ioprocess/exported-functions.c
A vdsm/storage/ioprocess/exported-functions.h
A vdsm/storage/ioprocess/ioprocess.c
A vdsm/storage/ioprocess/json-dom-generator.c
A vdsm/storage/ioprocess/json-dom-generator.h
A vdsm/storage/ioprocess/json-dom-parser.c
A vdsm/storage/ioprocess/json-dom-parser.h
A vdsm/storage/ioprocess/json-dom.c
A vdsm/storage/ioprocess/json-dom.h
M vdsm/storage/misc.py
M vdsm/storage/nfsSD.py
M vdsm/storage/outOfProcess.py
D vdsm/storage/processPool.py
M vdsm/storage/sd.py
M vdsm/storage/sp.py
M vdsm/storage/task.py
30 files changed, 3,018 insertions(+), 666 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/46/3946/1
--
To view, visit http://gerrit.ovirt.org/3946
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ie4664d5330debbe38ba33b74ebb586ac42913b4a
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Saggi Mizrahi <smizrahi(a)redhat.com>
9 years, 5 months
Change in vdsm[master]: Move fencing logic out of API.py
by smizrahi@redhat.com
Saggi Mizrahi has uploaded a new change for review.
Change subject: Move fencing logic out of API.py
......................................................................
Move fencing logic out of API.py
API.py should not contain logic.
In this patch I also:
- Separate the `status` action to a different function because it does
something completely different from the other action and has different
logic.
- Refactor for better code reusability readability.
- Use real exception internally and have the API layer translate them to
actual exceptions.
- Don't support port strings in methods, just in the interface.
- Don't support string booleans in the methods, just in the interface.
- Added shutdownEvent to ClientIF so there is a public standard way of
inspecting vdsm shutdown.
- Make the fencing logic use betterPopen
Change-Id: I944c6548a42612f705a410fb4290215451bca035
Signed-off-by: Saggi Mizrahi <smizrahi(a)redhat.com>
---
M vdsm.spec.in
M vdsm/API.py
M vdsm/Makefile.am
M vdsm/clientIF.py
4 files changed, 25 insertions(+), 72 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/90/7190/1
diff --git a/vdsm.spec.in b/vdsm.spec.in
index 23afd6b..f122cb5 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -558,6 +558,7 @@
%{_datadir}/%{vdsm_name}/caps.py*
%{_datadir}/%{vdsm_name}/clientIF.py*
%{_datadir}/%{vdsm_name}/API.py*
+%{_datadir}/%{vdsm_name}/fenceAgent.py*
%{_datadir}/%{vdsm_name}/hooking.py*
%{_datadir}/%{vdsm_name}/hooks.py*
%{_datadir}/%{vdsm_name}/libvirtev.py*
diff --git a/vdsm/API.py b/vdsm/API.py
index ae04e01..5826d81 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -19,12 +19,9 @@
#
import os
-import signal
import copy
-import subprocess
import pickle
import time
-import threading
import logging
from vdsm import utils
@@ -40,6 +37,7 @@
from vdsm.define import doneCode, errCode, Kbytes, Mbytes
import caps
from vdsm.config import config
+import fenceAgent
import supervdsm
@@ -981,82 +979,32 @@
agent is one of (rsa, ilo, drac5, ipmilan, etc)
action can be one of (status, on, off, reboot)."""
- def waitForPid(p, inp):
- """ Wait until p.pid exits. Kill it if vdsm exists before. """
- try:
- p.stdin.write(inp)
- p.stdin.close()
- while p.poll() is None:
- if not self._cif._enabled:
- self.log.debug('killing fence script pid %s', p.pid)
- os.kill(p.pid, signal.SIGTERM)
- time.sleep(1)
- try:
- # improbable race: p.pid may now belong to another
- # process
- os.kill(p.pid, signal.SIGKILL)
- except:
- pass
- return
- time.sleep(1)
- self.log.debug('rc %s inp %s out %s err %s', p.returncode,
- hidePasswd(inp),
- p.stdout.read(), p.stderr.read())
- except:
- self.log.error("Error killing fence script", exc_info=True)
-
- def hidePasswd(text):
- cleantext = ''
- for line in text.splitlines(True):
- if line.startswith('passwd='):
- line = 'passwd=XXXX\n'
- cleantext += line
- return cleantext
-
self.log.debug('fenceNode(addr=%s,port=%s,agent=%s,user=%s,' +
'passwd=%s,action=%s,secure=%s,options=%s)', addr, port, agent,
username, 'XXXX', action, secure, options)
- if action not in ('status', 'on', 'off', 'reboot'):
- raise ValueError('illegal action ' + action)
+ secure = utils.tobool(secure)
+ port = int(port)
- script = constants.EXT_FENCE_PREFIX + agent
+ if action == "status":
+ try:
+ power = fenceAgent.getFenceStatus(addr, port, agent, username,
+ password, secure, options)
+
+ return {'status': doneCode,
+ 'power': power}
+
+ except fenceAgent.FenceStatusCheckError as e:
+ return {'status': {'code': 1, 'message': str(e)}}
try:
- p = subprocess.Popen([script], stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE,
- close_fds=True)
- except OSError, e:
- if e.errno == os.errno.ENOENT:
- return errCode['fenceAgent']
- raise
+ fenceAgent.fenceNode(addr, port, agent, username, password, secure,
+ options, self._cif.shutdownEvent)
- inp = ('agent=fence_%s\nipaddr=%s\nlogin=%s\noption=%s\n' +
- 'passwd=%s\n') % (agent, addr, username, action, password)
- if port != '':
- inp += 'port=%s\n' % (port,)
- if utils.tobool(secure):
- inp += 'secure=yes\n'
- inp += options
- if action == 'status':
- out, err = p.communicate(inp)
- self.log.debug('rc %s in %s out %s err %s', p.returncode,
- hidePasswd(inp), out, err)
- if not 0 <= p.returncode <= 2:
- return {'status': {'code': 1,
- 'message': out + err}}
- message = doneCode['message']
- if p.returncode == 0:
- power = 'on'
- elif p.returncode == 2:
- power = 'off'
- else:
- power = 'unknown'
- message = out + err
- return {'status': {'code': 0, 'message': message},
- 'power': power}
- threading.Thread(target=waitForPid, args=(p, inp)).start()
- return {'status': doneCode}
+ return {'status': doneCode}
+
+ except fenceAgent.UnsupportedFencingAgentError:
+ return errCode['fenceAgent']
def ping(self):
"Ping the server. Useful for tests"
diff --git a/vdsm/Makefile.am b/vdsm/Makefile.am
index 62ba982..4a7bc57 100644
--- a/vdsm/Makefile.am
+++ b/vdsm/Makefile.am
@@ -32,6 +32,7 @@
clientIF.py \
configNetwork.py \
debugPluginClient.py \
+ fenceAgent.py \
guestIF.py \
hooking.py \
hooks.py \
@@ -52,7 +53,8 @@
tc.py \
vdsmDebugPlugin.py \
vmChannels.py \
- vm.py
+ vm.py \
+ $(NULL)
dist_vdsmpylib_PYTHON = \
__init__.py \
diff --git a/vdsm/clientIF.py b/vdsm/clientIF.py
index 8ba25a7..8760437 100644
--- a/vdsm/clientIF.py
+++ b/vdsm/clientIF.py
@@ -96,6 +96,7 @@
self.lastRemoteAccess = 0
self._memLock = threading.Lock()
self._enabled = True
+ self.shutdownEvent = threading.Event()
self._netConfigDirty = False
self._prepareMOM()
threading.Thread(target=self._recoverExistingVms,
@@ -219,6 +220,7 @@
for binding in self.bindings.values():
binding.prepareForShutdown()
self._enabled = False
+ self.shutdownEvent.set()
self.channelListener.stop()
self._hostStats.stop()
if self.mom:
--
To view, visit http://gerrit.ovirt.org/7190
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I944c6548a42612f705a410fb4290215451bca035
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Saggi Mizrahi <smizrahi(a)redhat.com>
9 years, 5 months
Change in vdsm[master]: [WIP]add createVm support of cputune
by lvroyce@linux.vnet.ibm.com
Royce Lv has uploaded a new change for review.
Change subject: [WIP]add createVm support of cputune
......................................................................
[WIP]add createVm support of cputune
allow engine to pass other cputune params through vm create,
createVm now uses nice to config vm share value,
this patch uses 'shares' in vmdef directly (1024 by default)
Change-Id: I76e9b9d291d4801965163774ba45d15b39a77471
Signed-off-by: Royce Lv<lvroyce(a)linux.vnet.ibm.com>
---
M vdsm/libvirtvm.py
1 file changed, 13 insertions(+), 11 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/45/8445/1
diff --git a/vdsm/libvirtvm.py b/vdsm/libvirtvm.py
index fd80c69..9b38a36 100644
--- a/vdsm/libvirtvm.py
+++ b/vdsm/libvirtvm.py
@@ -737,10 +737,13 @@
self.dom.appendChild(cpu)
def appendTunable(self):
- #CPU-pinning support
- # see http://www.ovirt.org/wiki/Features/Design/cpu-pinning
+ cputune = self.doc.createElement('cputune')
+ cputuneParams = {'shares':1024}
+ if 'cputune' in self.conf:
+ cputuneParam = self.conf['cputune']
if 'cpuPinning' in self.conf:
- cputune = self.doc.createElement('cputune')
+ #CPU-pinning support
+ # see http://www.ovirt.org/wiki/Features/Design/cpu-pinning
cpuPinning = self.conf.get('cpuPinning')
try:
emulatorset = cpuPinning.pop('emulator')
@@ -754,7 +757,13 @@
vcpupin.setAttribute('vcpu', cpuPin)
vcpupin.setAttribute('cpuset', cpuPinning[cpuPin])
cputune.appendChild(vcpupin)
- self.dom.appendChild(cputune)
+
+ for item in cputuneParams.keys():
+ m = self.doc.createElement(item)
+ m.appendChild(self.doc.createTextNode(cputuneParams[item]))
+ cputune.appendChild(m)
+
+ self.dom.appendChild(cputune)
def _appendAgentDevice(self, path, name):
"""
@@ -1338,13 +1347,6 @@
if self._initTimePauseCode == 'ENOSPC':
self.cont()
self.conf['pid'] = self._getPid()
-
- nice = int(self.conf.get('nice', '0'))
- nice = max(min(nice, 19), 0)
- try:
- self._dom.setSchedulerParameters({'cpu_shares': (20 - nice) * 51})
- except:
- self.log.warning('failed to set Vm niceness', exc_info=True)
def _run(self):
self.log.info("VM wrapper has started")
--
To view, visit http://gerrit.ovirt.org/8445
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I76e9b9d291d4801965163774ba45d15b39a77471
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Royce Lv <lvroyce(a)linux.vnet.ibm.com>
9 years, 5 months