Change in vdsm[master]: get the status of core dump
by shaohef@linux.vnet.ibm.com
ShaoHe Feng has uploaded a new change for review.
Change subject: get the status of core dump
......................................................................
get the status of core dump
Change-Id: I5d552db4dbd88762950ec5a113a25c13b73319c8
Signed-off-by: ShaoHe Feng <shaohef(a)linux.vnet.ibm.com>
---
M vdsm/API.py
M vdsm/BindingXMLRPC.py
M vdsm/vm.py
M vdsm_api/vdsmapi-schema.json
M vdsm_cli/vdsClient.py
5 files changed, 36 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/31/11131/1
diff --git a/vdsm/API.py b/vdsm/API.py
index c5f7d40..6b4663a 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -302,6 +302,15 @@
return errCode['noVM']
return v.dumpCancel()
+ def dumpStatus(self):
+ """
+ Report status of a currently outgoing core dump process.
+ """
+ v = self._cif.vmContainer.get(self._UUID)
+ if not v:
+ return errCode['noVM']
+ return v.dumpStatus()
+
def desktopLock(self):
"""
Lock user session in guest operating system using guest agent.
diff --git a/vdsm/BindingXMLRPC.py b/vdsm/BindingXMLRPC.py
index 17d97b1..b1f22fd 100644
--- a/vdsm/BindingXMLRPC.py
+++ b/vdsm/BindingXMLRPC.py
@@ -219,6 +219,10 @@
vm = API.VM(vmId)
return vm.dumpCancel()
+ def vmCoreDumpStatus(self, vmId):
+ vm = API.VM(vmId)
+ return vm.dumpStatus()
+
def vmReset(self, vmId):
vm = API.VM(vmId)
return vm.reset()
@@ -769,6 +773,7 @@
(self.vmCont, 'cont'),
(self.vmCoreDump, 'coreDump'),
(self.vmCoreDumpCancel, 'dumpCancel'),
+ (self.vmCoreDumpStatus, 'dumpStatus'),
(self.vmSnapshot, 'snapshot'),
(self.vmMerge, 'merge'),
(self.vmMergeStatus, 'mergeStatus'),
diff --git a/vdsm/vm.py b/vdsm/vm.py
index 0a40e97..5d9c0d9 100644
--- a/vdsm/vm.py
+++ b/vdsm/vm.py
@@ -1371,3 +1371,6 @@
return reportError(msg=e.message)
finally:
self._guestCpuLock.release()
+
+ def dumpStatus(self):
+ return self._doCoredumpThread.getStat()
diff --git a/vdsm_api/vdsmapi-schema.json b/vdsm_api/vdsmapi-schema.json
index 39d1cba..e96f01f 100644
--- a/vdsm_api/vdsmapi-schema.json
+++ b/vdsm_api/vdsmapi-schema.json
@@ -5484,6 +5484,16 @@
{'command': {'class': 'VM', 'name': 'dumpCancel'}}
##
+# @VM.dumpStatus:
+#
+# Reports the state of the currently core dump process
+#
+# Since: 4.10.4
+#
+##
+{'command': {'class': 'VM', 'name': 'dumpStatus'}}
+
+##
# @VM.monitorCommand:
#
# Send a command to the qemu monitor.
diff --git a/vdsm_cli/vdsClient.py b/vdsm_cli/vdsClient.py
index 32ad348..7edc674 100644
--- a/vdsm_cli/vdsClient.py
+++ b/vdsm_cli/vdsClient.py
@@ -1674,6 +1674,11 @@
response = self.s.dumpCancel(vmId)
return response['status']['code'], response['status']['message']
+ def do_dumpStat(self, args):
+ vmId = args[0]
+ response = self.s.dumpStatus(vmId)
+ return response['status']['code'], response['status']['message']
+
def coreDump(self, args):
dumpParams = {'crash': False,
'live': False,
@@ -2422,6 +2427,10 @@
('<vmId>',
'cancel machine core dump'
)),
+ 'coreDumpStatus': (serv.do_dumpStat,
+ ('<vmId>',
+ 'Check the progress of current core dump'
+ )),
'coreDump': (serv.coreDump,
('<vmId> <file> [live=<True|False>] '
'[crash=<True|False>] [bypass-cache=<True|False>] '
--
To view, visit http://gerrit.ovirt.org/11131
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I5d552db4dbd88762950ec5a113a25c13b73319c8
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: ShaoHe Feng <shaohef(a)linux.vnet.ibm.com>
8 years, 6 months
Change in vdsm[master]: dump the core of a domain
by shaohef@linux.vnet.ibm.com
ShaoHe Feng has uploaded a new change for review.
Change subject: dump the core of a domain
......................................................................
dump the core of a domain
libvirt support an API to dump the core of a domain on a given file for
analysis when guest OS crash.
There are two kind of dump files. one is QEMU suspend to disk image.
the other is core file which like kdump file butcontains registers'
value.
It's helpful to support by VDSM to find root cause if a guest gets hang
and kdump isn't set up in it. This would be a good RAS feature.
Here's the definition of the new API:
coreDump:
This method will dump the core of a domain on a given file for
analysis.
Input parameter:
vmId - VM UUID
to - the core file named by the user
flags - defined in libvirt.py
VIR_DUMP_CRASH
VIR_DUMP_LIVE
VIR_DUMP_BYPASS_CACHE
VIR_DUMP_RESET
VIR_DUMP_MEMORY_ONLY
Return value:
success: return doneCode
failure: return errCode including underlying libvirt error message.
Change-Id: If4aac9e747dc7aa64a6ff5ef256a7a4375aa2bb5
Signed-off-by: ShaoHe Feng <shaohef(a)linux.vnet.ibm.com>
---
M vdsm/API.py
M vdsm/BindingXMLRPC.py
M vdsm/define.py
M vdsm/libvirtvm.py
M vdsm_cli/vdsClient.py
5 files changed, 80 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/29/7329/1
diff --git a/vdsm/API.py b/vdsm/API.py
index 19cbb42..e2b24cb 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -244,6 +244,12 @@
self.log.debug("Error creating VM", exc_info=True)
return errCode['unexpected']
+ def coreDump(self, to, flags):
+ v = self._cif.vmContainer.get(self._UUID)
+ if not v:
+ return errCode['noVM']
+ return v.coreDump(to, flags)
+
def desktopLock(self):
"""
Lock user session in guest operating system using guest agent.
diff --git a/vdsm/BindingXMLRPC.py b/vdsm/BindingXMLRPC.py
index cc5300f..be71e6a 100644
--- a/vdsm/BindingXMLRPC.py
+++ b/vdsm/BindingXMLRPC.py
@@ -208,6 +208,10 @@
vm = API.VM(vmId)
return vm.cont()
+ def vmCoreDump(self, vmId, to, flags):
+ vm = API.VM(vmId)
+ return vm.coreDump(to, flags)
+
def vmReset(self, vmId):
vm = API.VM(vmId)
return vm.reset()
@@ -725,6 +729,7 @@
(self.getVMList, 'list'),
(self.vmPause, 'pause'),
(self.vmCont, 'cont'),
+ (self.vmCoreDump, 'coreDump'),
(self.vmSnapshot, 'snapshot'),
(self.vmMerge, 'merge'),
(self.vmMergeStatus, 'mergeStatus'),
diff --git a/vdsm/define.py b/vdsm/define.py
index 31deb4f..1fedac5 100644
--- a/vdsm/define.py
+++ b/vdsm/define.py
@@ -114,6 +114,10 @@
'mergeErr': {'status':
{'code': 52,
'message': 'Merge failed'}},
+ 'coreDumpErr': {'status':
+ {'code': 54,
+ 'message':
+ 'Failed to get coreDump file'}},
'recovery': {'status':
{'code': 99,
'message':
diff --git a/vdsm/libvirtvm.py b/vdsm/libvirtvm.py
index 4554fee..cbd9f96 100644
--- a/vdsm/libvirtvm.py
+++ b/vdsm/libvirtvm.py
@@ -1904,6 +1904,27 @@
self.saveState()
+ def coreDump(self, to, flags):
+
+ def reportError(key='coreDumpErr', msg=None):
+ self.log.error("get coreDump failed", exc_info=True)
+ if msg == None:
+ error = errCode[key]
+ else:
+ error = {'status' : {'code': errCode[key] \
+ ['status']['code'], 'message': msg}}
+ return error
+
+ if self._dom == None:
+ return reportError()
+ try:
+ self._dom.coreDump(to, flags)
+ except libvirt.libvirtError, e:
+ if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
+ return reportError(key='noVM')
+ return reportError(msg=e.message)
+ return {'status': doneCode}
+
def changeCD(self, drivespec):
return self._changeBlockDev('cdrom', 'hdc', drivespec)
diff --git a/vdsm_cli/vdsClient.py b/vdsm_cli/vdsClient.py
index eeb7c95..cdcd3a8 100644
--- a/vdsm_cli/vdsClient.py
+++ b/vdsm_cli/vdsClient.py
@@ -1589,6 +1589,33 @@
return status['status']['code'], status['status']['message']
+ def coreDump(self, args):
+ DUMPFLAGS = {'crash': 1 << 0,
+ 'live': 1 << 1,
+ 'bypass-cache': 1 << 2,
+ 'reset': 1 << 3,
+ 'memory-only': 1 << 4}
+ flags = 0
+ vmId = args[0]
+ coreFile = args[1]
+ params = {}
+ if len(args) > 2:
+ for arg in args[2:]:
+ kv = arg.split('=', 1)
+ if len(kv) < 2:
+ params[kv[0]] = "True"
+ else:
+ params[kv[0]] = kv[1]
+ for k, v in params.items():
+ if v.lower() == "true" or not v:
+ try:
+ flags = flags + DUMPFLAGS[k]
+ except KeyError:
+ print "unrecognized optoin %s for cormDump command" % k
+ response = self.s.coreDump(vmId, coreFile, flags)
+ return response['status']['code'], response['status']['message']
+
+
if __name__ == '__main__':
if _glusterEnabled:
serv = ge.GlusterService()
@@ -2239,6 +2266,23 @@
('<vmId> <sdUUID> <imgUUID> <baseVolUUID> <volUUID>',
"Take a live snapshot"
)),
+ 'coreDump': (serv.coreDump,
+ ('<vmId> <file> [live=<True>] '
+ '[crash=<True>] [bypass-cache=<True>] '
+ '[reset=<True>] [memory-only=<True>]',
+ "get memeory dump or migration file"
+ 'optional params:',
+ 'crash: crash the domain after core dump'
+ 'default False',
+ 'live: perform a live core dump if supported, '
+ 'default False',
+ 'bypass-cache: avoid file system cache when saving'
+ 'default False',
+ 'reset: reset the domain after core dump'
+ 'default False',
+ "memory-only: dump domain's memory only"
+ 'default False'
+ )),
}
if _glusterEnabled:
commands.update(ge.getGlusterCmdDict(serv))
--
To view, visit http://gerrit.ovirt.org/7329
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: If4aac9e747dc7aa64a6ff5ef256a7a4375aa2bb5
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: ShaoHe Feng <shaohef(a)linux.vnet.ibm.com>
8 years, 6 months
Change in vdsm[master]: cancel the core dump of a VM
by shaohef@linux.vnet.ibm.com
ShaoHe Feng has uploaded a new change for review.
Change subject: cancel the core dump of a VM
......................................................................
cancel the core dump of a VM
Change-Id: I2fa9e82cfbd43c9edb98fac9af41eb0deb0c67ad
Signed-off-by: ShaoHe Feng <shaohef(a)linux.vnet.ibm.com>
---
M vdsm/API.py
M vdsm/BindingXMLRPC.py
M vdsm/define.py
M vdsm/vm.py
M vdsm_api/vdsmapi-schema.json
M vdsm_cli/vdsClient.py
6 files changed, 62 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/30/11130/1
diff --git a/vdsm/API.py b/vdsm/API.py
index 4f5eed8..c5f7d40 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -293,6 +293,15 @@
return errCode['noVM']
return v.coreDump(to, dumpParams)
+ def dumpCancel(self):
+ """
+ Cancel a currently outgoing core dump process.
+ """
+ v = self._cif.vmContainer.get(self._UUID)
+ if not v:
+ return errCode['noVM']
+ return v.dumpCancel()
+
def desktopLock(self):
"""
Lock user session in guest operating system using guest agent.
diff --git a/vdsm/BindingXMLRPC.py b/vdsm/BindingXMLRPC.py
index 9fcbefd..17d97b1 100644
--- a/vdsm/BindingXMLRPC.py
+++ b/vdsm/BindingXMLRPC.py
@@ -215,6 +215,10 @@
vm = API.VM(vmId)
return vm.coreDump(to, params)
+ def vmCoreDumpCancel(self, vmId):
+ vm = API.VM(vmId)
+ return vm.dumpCancel()
+
def vmReset(self, vmId):
vm = API.VM(vmId)
return vm.reset()
@@ -764,6 +768,7 @@
(self.vmPause, 'pause'),
(self.vmCont, 'cont'),
(self.vmCoreDump, 'coreDump'),
+ (self.vmCoreDumpCancel, 'dumpCancel'),
(self.vmSnapshot, 'snapshot'),
(self.vmMerge, 'merge'),
(self.vmMergeStatus, 'mergeStatus'),
diff --git a/vdsm/define.py b/vdsm/define.py
index 84aacad..e1d428c 100644
--- a/vdsm/define.py
+++ b/vdsm/define.py
@@ -134,6 +134,9 @@
{'code': 58,
'message':
'Failed to generate coreDump file'}},
+ 'dumpCancelErr': {'status':
+ {'code': 59,
+ 'message': 'Failed to cancel dump'}},
'recovery': {'status':
{'code': 99,
'message':
diff --git a/vdsm/vm.py b/vdsm/vm.py
index be947c6..0a40e97 100644
--- a/vdsm/vm.py
+++ b/vdsm/vm.py
@@ -1345,3 +1345,29 @@
return check
finally:
self._guestCpuLock.release()
+
+ def dumpCancel(self):
+ def reportError(self, key='dumpCancelErr', msg=None):
+ if msg is None:
+ error = errCode[key]
+ else:
+ error = {'status':
+ {'code': errCode[key]['status']['code'],
+ 'message': msg}}
+ self.log.error("Failed to cancel core dump. " + msg,
+ exc_info=True)
+ return error
+
+ self._acquireCpuLockWithTimeout()
+ try:
+ if not self.isDoingDump():
+ return reportError(msg='no core dump in process')
+ if self.dumpMode() == "memory":
+ return reportError(msg='invalid to cancel memory dump')
+ self._doCoredumpThread.stop()
+ return {'status': {'code': 0,
+ 'message': 'core dump process stopped'}}
+ except Exception, e:
+ return reportError(msg=e.message)
+ finally:
+ self._guestCpuLock.release()
diff --git a/vdsm_api/vdsmapi-schema.json b/vdsm_api/vdsmapi-schema.json
index 63b0fb1..39d1cba 100644
--- a/vdsm_api/vdsmapi-schema.json
+++ b/vdsm_api/vdsmapi-schema.json
@@ -5474,6 +5474,16 @@
'data': {'to': 'str', 'params': 'DumpParams'}}
##
+# @VM.dumpCancel:
+#
+# Cancel the currently outgoing core dump process.
+#
+# Since: 4.10.4
+#
+##
+{'command': {'class': 'VM', 'name': 'dumpCancel'}}
+
+##
# @VM.monitorCommand:
#
# Send a command to the qemu monitor.
diff --git a/vdsm_cli/vdsClient.py b/vdsm_cli/vdsClient.py
index c4171d9..32ad348 100644
--- a/vdsm_cli/vdsClient.py
+++ b/vdsm_cli/vdsClient.py
@@ -1669,6 +1669,11 @@
return status['status']['code'], status['status']['message']
+ def do_dumpCancel(self, args):
+ vmId = args[0]
+ response = self.s.dumpCancel(vmId)
+ return response['status']['code'], response['status']['message']
+
def coreDump(self, args):
dumpParams = {'crash': False,
'live': False,
@@ -2413,6 +2418,10 @@
'Start live replication to the destination '
'domain'
)),
+ 'coreDumpCancel': (serv.do_dumpCancel,
+ ('<vmId>',
+ 'cancel machine core dump'
+ )),
'coreDump': (serv.coreDump,
('<vmId> <file> [live=<True|False>] '
'[crash=<True|False>] [bypass-cache=<True|False>] '
--
To view, visit http://gerrit.ovirt.org/11130
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I2fa9e82cfbd43c9edb98fac9af41eb0deb0c67ad
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: ShaoHe Feng <shaohef(a)linux.vnet.ibm.com>
8 years, 6 months
Change in vdsm[master]: [WIP] Added glusterVolumeTop verb
by tjeyasin@redhat.com
Hello Ayal Baron, Bala.FA, Saggi Mizrahi, Federico Simoncelli, Dan Kenigsberg,
I'd like you to do a code review. Please visit
http://gerrit.ovirt.org/7844
to review the following change.
Change subject: [WIP] Added glusterVolumeTop verb
......................................................................
[WIP] Added glusterVolumeTop verb
Added glusterVolumeTopOpen verb
Added glusterVolumeTopRead verb
Added glusterVolumeTopWrite verb
Added glusterVolumeTopOpenDir verb
Added glusterVolumeTopReadDir verb
Added glusterVolumeTopReadPerf
verb Added glusterVolumeTopWritePerf verb
Following is the output structure of glusterVolumeTopOpen
{'statusCode' : CODE,
'brickCount': BRICK-COUNT,
'bricks': {BRICK-NAME: {'count':FILE-COUNT,
'currentOpenFds': CURRENT-OPEN-FDS-COUNT,
'maxOpen': MAX-OPEN,
'maxOpenTime': MAX-OPEN-TIME,
'files': [{FILE-NAME: FILE-OPEN-COUNT}, ...]
}, ...} }
Following is the output structure of glusterVolumeTopRead
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {
'count': FILE-COUNT,
'files': [{FILE-NAME: FILE-READ-COUNT}, ...]}
,...}}
Following is the output structure glusterVolumeTopWrite
{'statusCode' : CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'count': FILE-COUNT,
'files': [{FILE-NAME: FILE-WRITE-COUNT}...]}
,...}}
Following is the output structure glusterVolumeTopOpenDir
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'count':OPEN-DIR-COUNT,
'files': [{DIR-NAME: DIR-OPEN-COUNT}, ...]}
,...}
Following is the output structure glusterVolumeTopReadDir
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'count':READ-DIR-COUNT,
'files': [{DIR-NAME: DIR-READ-COUNT}, ...]}
,...}
Following is the output structure glusterVolumeTopReadPerf
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'fileCount':READ-COUNT,
'throughput': BRICK-WISE-READ-THROUGHPUT,
' timeTaken': TIME-TAKEN,
'files': [{FILE-NAME:
{'throughput':FILE-READ-THROUGHPUT,
'time': TIME}}, ...]}
,...}}
Following is the output structure glusterVolumeTopWritePerf
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'fileCount':WRITE-COUNT,
'throughput': BRICK-WISE-WRITE-THROUGHPUT,
' timeTaken': TIME-TAKEN,
'files': [{FILE-NAME:
{'throughput':FILE-WRITE-THROUGHPUT,
'time': TIME}}, ...]}
,...}}
Change-Id: I96486363a9acb7472014a67fcd2d5185d4f3c428
Signed-off-by: Timothy Asir <tjeyasin(a)redhat.com>
---
M vdsm/gluster/api.py
M vdsm/gluster/cli.py
M vdsm/gluster/exception.py
M vdsm_cli/vdsClientGluster.py
4 files changed, 372 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/44/7844/1
diff --git a/vdsm/gluster/api.py b/vdsm/gluster/api.py
index e52430b..3f493e0 100644
--- a/vdsm/gluster/api.py
+++ b/vdsm/gluster/api.py
@@ -241,6 +241,61 @@
status = self.svdsmProxy.glusterVolumeProfileInfo(volumeName)
return {'profileInfo': status}
+ @exportAsVerb
+ def volumeTopOpen(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopOpen(volumeName,
+ brickName, count)
+ return {'topOpen': status}
+
+ @exportAsVerb
+ def volumeTopRead(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopRead(volumeName,
+ brickName, count)
+ return {'topRead': status}
+
+ @exportAsVerb
+ def volumeTopWrite(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopWrite(volumeName,
+ brickName, count)
+ return {'topWrite': status}
+
+ @exportAsVerb
+ def volumeTopOpenDir(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopOpenDir(volumeName,
+ brickName, count)
+ return {'topOpenDir': status}
+
+ @exportAsVerb
+ def volumeTopWriteDir(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopWriteDir(volumeName,
+ brickName, count)
+ return {'topWriteDir': status}
+
+ @exportAsVerb
+ def volumeTopReadPerf(self, volumeName, blockSize=None, count=None,
+ brickName=None, listCount=None, options=None):
+ status = self.svdsmProxy.glusterVolumeTopReadPerf(volumeName,
+ blockSize,
+ count,
+ brickName,
+ listCount)
+ return {'topReadPerf': status}
+
+ @exportAsVerb
+ def volumeTopWritePerf(self, volumeName, blockSize=None, count=None,
+ brickName=None, listCount=None, options=None):
+ status = self.svdsmProxy.glusterVolumeTopWritePerf(volumeName,
+ blockSize,
+ count,
+ brickName,
+ listCount)
+ return {'topWritePerf': status}
+
def getGlusterMethods(gluster):
l = []
diff --git a/vdsm/gluster/cli.py b/vdsm/gluster/cli.py
index b91a04f..ba4768c 100644
--- a/vdsm/gluster/cli.py
+++ b/vdsm/gluster/cli.py
@@ -334,6 +334,66 @@
return volumeInfoDict
+def _parseGlusterVolumeTopOpen(tree):
+ bricks = {}
+ for brick in tree.findall('volTop/brick'):
+ fileList = []
+ for file in brick.findall('file'):
+ fileList.append({file.find('filename').text:
+ file.find('count').text})
+ bricks[brick.find('name').text] = {
+ 'count': brick.find('members').text,
+ 'currentOpen': brick.find('currentOpen').text,
+ 'maxOpen': brick.find('maxOpen').text,
+ 'maxOpenTime': brick.find('maxOpenTime').text,
+ 'files': fileList}
+ status = {
+ 'topOp': tree.find('volTop/topOp').text,
+ 'brickCount': tree.find('volTop/brickCount').text,
+ 'statusCode': tree.find('opRet').text,
+ 'bricks': bricks}
+ return status
+
+
+def _parseGlusterVolumeTop(tree):
+ bricks = {}
+ for brick in tree.findall('volTop/brick'):
+ fileList = []
+ for fileTag in brick.findall('file'):
+ fileList.append({fileTag.find('filename').text:
+ fileTag.find('count').text})
+ bricks[brick.find('name').text] = {
+ 'count': brick.find('members').text,
+ 'files': fileList}
+ status = {
+ 'topOp': tree.find('volTop/topOp').text,
+ 'brickCount': tree.find('volTop/brickCount').text,
+ 'statusCode': tree.find('opRet').text,
+ 'bricks': bricks}
+ return status
+
+
+def _parseGlusterVolumeTopPerf(tree):
+ bricks = {}
+ for brick in tree.findall('volTop/brick'):
+ fileList = []
+ for fileTag in brick.findall('file'):
+ fileList.append({fileTag.find('filename').text:
+ {'count': fileTag.find('count').text,
+ 'time': fileTag.find('time').text}})
+ bricks[brick.find('name').text] = {
+ 'count': brick.find('members').text,
+ 'throughput': brick.find('throughput').text,
+ 'timeTaken': brick.find('timeTaken').text,
+ 'files': fileList}
+ status = {
+ 'topOp': tree.find('volTop/topOp').text,
+ 'brickCount': tree.find('volTop/brickCount').text,
+ 'statusCode': tree.find("opRet").text,
+ 'bricks': bricks}
+ return status
+
+
def _parseGlusterVolumeProfileInfo(tree):
bricks = {}
for brick in tree.findall('volProfile/brick'):
@@ -819,3 +879,132 @@
return _parseGlusterVolumeProfileInfo(xmltree)
except:
raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopOpen(volumeName, brickName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "open"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopOpenFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTopOpen(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopRead(volumeName, brickName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "read"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopReadFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTop(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopWrite(volumeName, brickName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "write"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopWriteFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTop(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopOpenDir(volumeName, brickName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "opendir"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopOpenDirFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTop(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopReadDir(volumeName, bricName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "write"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopReadDirFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTop(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopReadPerf(volumeName, blockSize=None, count=None,
+ brickName=None, listCount=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "read-perf"]
+ if blockSize:
+ command += ["bs", "%s" % blockSize]
+ if count:
+ command += ["count", "%s" % count]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if listCount:
+ command += ["list-cnt", "%s" % listCount]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopReadPerfFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTopPerf(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopWritePerf(volumeName, blockSize=None, count=None,
+ brickName=None, listCount=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "write-perf"]
+ if blockSize:
+ command += ["bs", "%s" % blockSize]
+ if count:
+ command += ["count", "%s" % count]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if listCount:
+ command += ["list-cnt", "%s" % listCount]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopWritePerfFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTopPerf(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
diff --git a/vdsm/gluster/exception.py b/vdsm/gluster/exception.py
index bc20dd0..b392ec8 100644
--- a/vdsm/gluster/exception.py
+++ b/vdsm/gluster/exception.py
@@ -343,6 +343,41 @@
message = "Volume profile info failed"
+class GlusterVolumeTopOpenFailedException(GlusterVolumeException):
+ code = 4161
+ message = "Volume top open failed"
+
+
+class GlusterVolumeTopReadFailedException(GlusterVolumeException):
+ code = 4162
+ message = "Volume top read failed"
+
+
+class GlusterVolumeTopWriteFailedException(GlusterVolumeException):
+ code = 4163
+ message = "Volume top write failed"
+
+
+class GlusterVolumeTopOpenDirFailedException(GlusterVolumeException):
+ code = 4164
+ message = "Volume top open dir failed"
+
+
+class GlusterVolumeTopReadDirFailedException(GlusterVolumeException):
+ code = 4165
+ message = "Volume top read dir failed"
+
+
+class GlusterVolumeTopReadPerfFailedException(GlusterVolumeException):
+ code = 4166
+ message = "Volume top read perf failed"
+
+
+class GlusterVolumeTopWritePerfFailedException(GlusterVolumeException):
+ code = 4167
+ message = "Volume top write perf failed"
+
+
# Host
class GlusterHostException(GlusterException):
code = 4400
diff --git a/vdsm_cli/vdsClientGluster.py b/vdsm_cli/vdsClientGluster.py
index 8422695..3663c63 100644
--- a/vdsm_cli/vdsClientGluster.py
+++ b/vdsm_cli/vdsClientGluster.py
@@ -221,6 +221,41 @@
pp.pprint(status)
return status['status']['code'], status['status']['message']
+ def do_glusterVolumeTopOpen(self, args):
+ status = self.s.glusterVolumeTopOpen(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopRead(self, args):
+ status = self.s.glusterVolumeTopRead(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopWrite(self, args):
+ status = self.s.glusterVolumeTopWrite(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopOpenDir(self, args):
+ status = self.s.glusterVolumeTopOpenDir(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopReadDir(self, args):
+ status = self.s.glusterVolumeTopReadDir(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopReadPerf(self, args):
+ status = self.s.glusterVolumeTop(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopWritePerf(self, args):
+ status = self.s.glusterVolumeTop(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
def getGlusterCmdDict(serv):
return {
@@ -403,4 +438,62 @@
('<volume_name>\n\t<volume_name> is existing volume name',
'get gluster volume profile info'
)),
+ 'glusterVolumeTopOpen':
+ (serv.do_glusterVolumeTopOpen,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get volume top open fd count and maximum fd count of '
+ 'a given volume with its all brick or specified brick'
+ )),
+ 'glusterVolumeTopRead':
+ (serv.do_glusterVolumeTopRead,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of highest read calls on each brick or '
+ 'a specified brick of a volume'
+ )),
+ 'glusterVolumeTopWrite':
+ (serv.do_glusterVolumeTopWrite,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of highest write calls on each brick or '
+ 'a specified brick of a volume'
+ )),
+ 'glusterVolumeTopOpenDir':
+ (serv.do_glusterVolumeTopOpenDir,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of highest open calls on directories of each brick '
+ 'or a specified brick of a volume'
+ )),
+ 'glusterVolumeTopReadDir':
+ (serv.do_glusterVolumeTopReadDir,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of highest read calls on directories of each brick '
+ 'or a specified brick of a volume'
+ )),
+ 'glusterVolumeTopReadPerf':
+ (serv.do_glusterVolumeTopReadPerf,
+ ('<volume_name> [block_size=<block_size>] '
+ '[count=<count>] [brick=<existing_brick>] '
+ '[list_count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of read throughput of files on bricks. '
+ 'if the block size and the count is not specified, '
+ 'it will give the output based on historical data'
+ )),
+ 'glusterVolumeTopWritePerf':
+ (serv.do_glusterVolumeTopWritePerf,
+ ('<volume_name> [block_size=<block_size>] '
+ '[count=<count>] [brick=<existing_brick>] '
+ '[list_count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of write throughput of files on bricks'
+ )),
}
--
To view, visit http://gerrit.ovirt.org/7844
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I96486363a9acb7472014a67fcd2d5185d4f3c428
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Timothy Asir <tjeyasin(a)redhat.com>
Gerrit-Reviewer: Ayal Baron <abaron(a)redhat.com>
Gerrit-Reviewer: Bala.FA <barumuga(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Federico Simoncelli <fsimonce(a)redhat.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
8 years, 6 months
Change in vdsm[master]: gluster: Setup and verify ssl connection between nodes.
by tjeyasin@redhat.com
Hello Ayal Baron, Bala.FA, Saggi Mizrahi, Dan Kenigsberg,
I'd like you to do a code review. Please visit
http://gerrit.ovirt.org/18355
to review the following change.
Change subject: gluster: Setup and verify ssl connection between nodes.
......................................................................
gluster: Setup and verify ssl connection between nodes.
This will be used in geo-replication session creation.
Because, there should be password-less ssh access between
at least one node of master volume and one node of slave
volume before creating geo-replication session.
Below new verbs are added
*glusterValidateSshConnection
*glusterSetupSshConnection
Change-Id: Ia6f040b1343998de4f8e28419c63e380240368db
Signed-off-by: Bala.FA <barumuga(a)redhat.com>
Signed-off-by: Timothy Asir <tjeyasin(a)redhat.com>
---
M client/vdsClientGluster.py
M vdsm.spec.in
M vdsm/gluster/api.py
M vdsm/gluster/exception.py
4 files changed, 191 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/55/18355/1
diff --git a/client/vdsClientGluster.py b/client/vdsClientGluster.py
index 90af83e..0ae7ec7 100644
--- a/client/vdsClientGluster.py
+++ b/client/vdsClientGluster.py
@@ -424,6 +424,30 @@
pp.pprint(status)
return status['status']['code'], status['status']['message']
+ def do_glusterValidateSshConnection(self, args):
+ params = self._eqSplit(args)
+ host = params.get('host', '')
+ fingerprint = params.get('fingerprint', '')
+ username = params.get('username', '')
+
+ status = self.s.glusterValidateSshConnection(host,
+ fingerprint,
+ username)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterSetupSshConnection(self, args):
+ params = self._eqSplit(args)
+ host = params.get('host', '')
+ fingerprint = params.get('fingerprint', '')
+ username = params.get('username', '')
+ password = params.get('password', '')
+
+ status = self.s.glusterSetupSshConnection(host,
+ fingerprint,
+ username,
+ password)
+ return status['status']['code'], status['status']['message']
+
def getGlusterCmdDict(serv):
return \
@@ -705,4 +729,15 @@
'not set'
'(swift, glusterd, smb, memcached)'
)),
+ 'glusterValidateSshConnection': (
+ serv.do_glusterValidateSshConnection,
+ ('host=<host> fingerprint=<fingerprint> username=<username>',
+ 'validate passwordless ssh connection'
+ )),
+ 'glusterSetupSshConnection': (
+ serv.do_glusterSetupSshConnection,
+ ('host=<host> fingerprint=<fingerprint> username=<username> '
+ 'password=<password>',
+ 'setup passwordless ssh connection'
+ )),
}
diff --git a/vdsm.spec.in b/vdsm.spec.in
index e2307e0..81d8f9f 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -524,6 +524,7 @@
Requires: glusterfs-fuse
Requires: glusterfs-rdma
Requires: python-magic
+Requires: python-paramiko
%description gluster
Gluster plugin enables VDSM to serve Gluster functionalities.
diff --git a/vdsm/gluster/api.py b/vdsm/gluster/api.py
index 4bd8308..1d93150 100644
--- a/vdsm/gluster/api.py
+++ b/vdsm/gluster/api.py
@@ -19,11 +19,28 @@
#
from functools import wraps
+import socket
+import paramiko
+import logging
+import os
+import re
from vdsm.define import doneCode
import supervdsm as svdsm
+from vdsm.config import config
+from vdsm import utils
+import exception as ge
_SUCCESS = {'status': doneCode}
+_KEYFILE = config.get('vars', 'trust_store_path') + '/keys/vdsmkey.pem'
+_sshKeyGenCommandPath = utils.CommandPath("ssh-keygen",
+ "/usr/bin/ssh-keygen",
+ )
+_SSH_COPY_ID_CMD = 'umask 077 && mkdir -p ~/.ssh && ' \
+ 'cat >> ~/.ssh/authorized_keys && if test -x /sbin/restorecon; ' \
+ 'then /sbin/restorecon ~/.ssh ~/.ssh/authorized_keys >/dev/null 2>&1; ' \
+ 'else true; fi'
+paramiko.util.get_logger('paramiko').setLevel(logging.ERROR)
GLUSTER_RPM_PACKAGES = (
('glusterfs', 'glusterfs'),
@@ -59,6 +76,57 @@
wrapper.exportAsVerb = True
return wrapper
+
+
+class VolumeStatus():
+ ONLINE = 'ONLINE'
+ OFFLINE = 'OFFLINE'
+
+
+class HostKeyMatchException(paramiko.SSHException):
+ def __init__(self, hostname, fingerprint, expected_fingerprint):
+ self.err = 'Fingerprint %s of host %s does not match with %s' % \
+ (fingerprint, hostname, expected_fingerprint)
+ paramiko.SSHException.__init__(self, self.err)
+ self.hostname = hostname
+ self.fingerprint = fingerprint
+ self.expected_fingerprint = expected_fingerprint
+
+
+class HostKeyMatchPolicy(paramiko.AutoAddPolicy):
+ def __init__(self, expected_fingerprint):
+ self.expected_fingerprint = expected_fingerprint
+
+ def missing_host_key(self, client, hostname, key):
+ s = paramiko.util.hexlify(key.get_fingerprint())
+ fingerprint = ':'.join(re.findall('..', s))
+ if fingerprint.upper() == self.expected_fingerprint.upper():
+ paramiko.AutoAddPolicy.missing_host_key(self, client, hostname,
+ key)
+ else:
+ raise HostKeyMatchException(hostname, fingerprint,
+ self.expected_fingerprint)
+
+
+class GlusterSsh(paramiko.SSHClient):
+ def __init__(self, hostname, fingerprint, port=22, username=None,
+ password=None, pkey=None, key_filenames=[], timeout=None,
+ allow_agent=True, look_for_keys=True, compress=False):
+ paramiko.SSHClient.__init__(self)
+ key_file_list = []
+ if os.path.exists(_KEYFILE):
+ key_file_list.append(_KEYFILE)
+ key_file_list.append(key_filenames)
+ self.set_missing_host_key_policy(HostKeyMatchPolicy(fingerprint))
+ try:
+ paramiko.SSHClient.connect(self, hostname, port, username,
+ password, pkey, key_file_list, timeout,
+ allow_agent, look_for_keys, compress)
+ except socket.error, e:
+ err = ['%s: %s' % (hostname, e)]
+ raise ge.GlusterSshConnectionFailedException(err=err)
+ except HostKeyMatchException, e:
+ raise ge.GlusterSshHostKeyMismatchException(err=[e.err])
class GlusterApi(object):
@@ -287,6 +355,57 @@
status = self.svdsmProxy.glusterServicesGet(serviceNames)
return {'services': status}
+ def _validateSshConnection(self, hostname, fingerprint, username):
+ try:
+ ssh = GlusterSsh(hostname,
+ fingerprint,
+ username=username)
+ ssh.close()
+ return True
+ except paramiko.SSHException, e:
+ raise ge.GlusterSshHostKeyAuthException(err=[str(e)])
+
+ @exportAsVerb
+ def validateSshConnection(self, hostname, fingerprint, username,
+ options=None):
+ self._validateSshConnection(hostname, fingerprint, username)
+
+ @exportAsVerb
+ def setupSshConnection(self, hostname, fingerprint, username, password,
+ options=None):
+ rc, out, err = utils.execCmd([_sshKeyGenCommandPath.cmd, '-y', '-f',
+ _KEYFILE])
+ if rc != 0:
+ raise ge.GlusterSshPubKeyGenerationFailedException(rc=rc, err=err)
+
+ try:
+ ssh = GlusterSsh(hostname,
+ fingerprint,
+ username=username,
+ password=password)
+ c = ssh.get_transport().open_session()
+ c.exec_command(_SSH_COPY_ID_CMD)
+ stdin = c.makefile('wb')
+ stdout = c.makefile('rb')
+ stderr = c.makefile_stderr('rb')
+ stdin.write('\n'.join(out) + '\n')
+ stdin.flush()
+ stdin.close()
+ c.shutdown_write()
+ rc = c.recv_exit_status()
+ out = stdout.read().splitlines()
+ err = stderr.read().splitlines()
+ c.close()
+ ssh.close()
+ if rc != 0:
+ raise ge.GlusterSshSetupExecFailedException(rc=rc,
+ out=out,
+ err=err)
+ except paramiko.AuthenticationException, e:
+ raise ge.GlusterSshHostAuthException(err=[str(e)])
+
+ self._validateSshConnection(hostname, fingerprint, username)
+
def getGlusterMethods(gluster):
l = []
diff --git a/vdsm/gluster/exception.py b/vdsm/gluster/exception.py
index c569a9e..c9a0548 100644
--- a/vdsm/gluster/exception.py
+++ b/vdsm/gluster/exception.py
@@ -484,3 +484,39 @@
prefix = "%s: " % (action)
self.message = prefix + "Service action is not supported"
self.err = [self.message]
+
+
+# Ssh
+class GlusterSshException(GlusterException):
+ code = 4500
+ message = "Gluster ssh exception"
+
+
+class GlusterSshConnectionFailedException(GlusterSshException):
+ code = 4501
+ message = "SSH connection failed"
+
+
+class GlusterSshHostKeyMismatchException(GlusterSshException):
+ code = 4502
+ message = "Host key match failed"
+
+
+class GlusterSshHostKeyAuthException(GlusterSshException):
+ code = 4503
+ message = "SSH host key authentication failed"
+
+
+class GlusterSshHostAuthException(GlusterSshException):
+ code = 4504
+ message = "SSH host authentication failed"
+
+
+class GlusterSshPubKeyGenerationFailedException(GlusterSshException):
+ code = 4505
+ message = "SSH public key generation failed"
+
+
+class GlusterSshSetupExecFailedException(GlusterSshException):
+ code = 4506
+ message = "SSH key setup execution failed"
--
To view, visit http://gerrit.ovirt.org/18355
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ia6f040b1343998de4f8e28419c63e380240368db
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Timothy Asir <tjeyasin(a)redhat.com>
Gerrit-Reviewer: Ayal Baron <abaron(a)redhat.com>
Gerrit-Reviewer: Bala.FA <barumuga(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
8 years, 6 months
Change in vdsm[master]: keep pauseCode when migrating paused VM
by ahadas@redhat.com
Arik Hadas has uploaded a new change for review.
Change subject: keep pauseCode when migrating paused VM
......................................................................
keep pauseCode when migrating paused VM
This patch fix a bug where the pauseCode is cleared on the destination
host when migrating paused VM.
Change-Id: Iead0697bbebba3f261040221b04cd3745d8ef036
Signed-off-by: Arik Hadas <ahadas(a)redhat.com>
---
M vdsm/virt/vm.py
1 file changed, 4 insertions(+), 3 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/01/27801/1
diff --git a/vdsm/virt/vm.py b/vdsm/virt/vm.py
index 1bd9fed..8127402 100644
--- a/vdsm/virt/vm.py
+++ b/vdsm/virt/vm.py
@@ -1952,8 +1952,9 @@
self._ongoingCreations.release()
self.log.debug("_ongoingCreations released")
- if ('migrationDest' in self.conf or 'restoreState' in self.conf) \
- and self.lastStatus != vmstatus.DOWN:
+ migrating = 'migrationDest' in self.conf or \
+ 'restoreState' in self.conf
+ if migrating and self.lastStatus != vmstatus.DOWN:
self._waitForIncomingMigrationFinish()
self.lastStatus = vmstatus.UP
@@ -1961,7 +1962,7 @@
self.conf['pauseCode'] = self._initTimePauseCode
if self._initTimePauseCode == 'ENOSPC':
self.cont()
- else:
+ elif not migrating:
try:
with self._confLock:
del self.conf['pauseCode']
--
To view, visit http://gerrit.ovirt.org/27801
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Iead0697bbebba3f261040221b04cd3745d8ef036
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Arik Hadas <ahadas(a)redhat.com>
8 years, 6 months
Change in vdsm[master]: do not use OOP for padding snapshot's memory volume
by ahadas@redhat.com
Arik Hadas has uploaded a new change for review.
Change subject: do not use OOP for padding snapshot's memory volume
......................................................................
do not use OOP for padding snapshot's memory volume
Change-Id: I2a94354e188019f3afd209633979ec5a5b35293b
Signed-off-by: Arik Hadas <ahadas(a)redhat.com>
---
M vdsm/virt/vm.py
1 file changed, 3 insertions(+), 9 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/38/26538/1
diff --git a/vdsm/virt/vm.py b/vdsm/virt/vm.py
index 6711bc6..b8ce533 100644
--- a/vdsm/virt/vm.py
+++ b/vdsm/virt/vm.py
@@ -3543,15 +3543,9 @@
'_srcDomXML': self._dom.XMLDesc(0),
'elapsedTimeOffset': time.time() - self._startTime}
- def _padMemoryVolume(memoryVolPath, sdUUID):
- sdType = sd.name2type(
- self.cif.irs.getStorageDomainInfo(sdUUID)['info']['type'])
- if sdType in sd.FILE_DOMAIN_TYPES:
- if sdType == sd.NFS_DOMAIN:
- oop.getProcessPool(sdUUID).fileUtils. \
- padToBlockSize(memoryVolPath)
- else:
- fileUtils.padToBlockSize(memoryVolPath)
+ def _padMemoryVolume(memoryVolPath):
+ if not utils.isBlockDevice(memoryVolPath):
+ fileUtils.padToBlockSize(memoryVolPath)
snap = xml.dom.minidom.Element('domainsnapshot')
disks = xml.dom.minidom.Element('disks')
--
To view, visit http://gerrit.ovirt.org/26538
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I2a94354e188019f3afd209633979ec5a5b35293b
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Arik Hadas <ahadas(a)redhat.com>
8 years, 6 months
Change in vdsm[master]: clientIF: Remove unnecessary device is disk check
by sgotliv@redhat.com
Sergey Gotliv has uploaded a new change for review.
Change subject: clientIF: Remove unnecessary device is disk check
......................................................................
clientIF: Remove unnecessary device is disk check
Change-Id: I98317e805e6770df5dacd3237a383aaca78fde1e
Signed-off-by: Sergey Gotliv <sgotliv(a)redhat.com>
---
M vdsm/clientIF.py
1 file changed, 1 insertion(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/63/22363/1
diff --git a/vdsm/clientIF.py b/vdsm/clientIF.py
index c083991..124f8e5 100644
--- a/vdsm/clientIF.py
+++ b/vdsm/clientIF.py
@@ -244,7 +244,7 @@
def prepareVolumePath(self, drive, vmId=None):
if type(drive) is dict:
# PDIV drive format
- if drive['device'] == 'disk' and vm.isVdsmImage(drive):
+ if vm.isVdsmImage(drive):
res = self.irs.prepareImage(
drive['domainID'], drive['poolID'],
drive['imageID'], drive['volumeID'])
--
To view, visit http://gerrit.ovirt.org/22363
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I98317e805e6770df5dacd3237a383aaca78fde1e
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Sergey Gotliv <sgotliv(a)redhat.com>
8 years, 6 months
Change in vdsm[master]: testSetupNetworksAddDelDhcp: Check that an DHCPv4 address is...
by osvoboda@redhat.com
Ondřej Svoboda has uploaded a new change for review.
Change subject: testSetupNetworksAddDelDhcp: Check that an DHCPv4 address is in range
......................................................................
testSetupNetworksAddDelDhcp: Check that an DHCPv4 address is in range
(At least) one address is now required to be in a range we know and control.
Also introduce range checking for IPv6.
Change-Id: Ied7e11e84b8f81c39c799fadfca06b7bde0a409c
Signed-off-by: Ondřej Svoboda <osvoboda(a)redhat.com>
---
M tests/functional/networkTests.py
1 file changed, 26 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/86/36186/1
diff --git a/tests/functional/networkTests.py b/tests/functional/networkTests.py
index f23f033..c09e122 100644
--- a/tests/functional/networkTests.py
+++ b/tests/functional/networkTests.py
@@ -22,6 +22,8 @@
import json
import signal
import netaddr
+import socket
+import struct
from hookValidation import ValidatesHook
from network.sourceroute import StaticSourceRoute
@@ -217,6 +219,20 @@
del attrs['d']
+# adapted from http://stackoverflow.com/questions/10558441/
+# inet-aton-similar-function-for-ipv6
+def _ipv4_to_int(addr):
+ addr = addr.split('/')[0] # remove the network prefix
+ return struct.unpack('>L', socket.inet_aton(addr))[0]
+
+
+def _ipv6_to_int(addr):
+ addr = addr.split('/')[0] # remove the network prefix
+ addr = socket.inet_pton(socket.AF_INET6, addr)
+ hi, lo = struct.unpack(">QQ", addr)
+ return (hi << 64) | lo
+
+
@expandPermutations
class NetworkTest(TestCaseBase):
@@ -367,6 +383,15 @@
def assertMtu(self, mtu, *elems):
for elem in elems:
self.assertEquals(int(mtu), int(self.vdsm_net.getMtu(elem)))
+
+ def assertAddrInRange(self, addrs, family=4,
+ range_from=DHCP_RANGE_FROM, range_to=DHCP_RANGE_TO):
+ addr_to_int = _ipv4_to_int if family == 4 else _ipv6_to_int
+ range_from = addr_to_int(range_from)
+ range_to = addr_to_int(range_to)
+ passes = (range_from <= addr_to_int(a) <= range_to for a in addrs)
+ self.assertTrue(any(passes), 'no address {0} in expected range'.format(
+ addrs))
def testLegacyBonds(self):
if not (caps.getos() in (caps.OSName.RHEVH, caps.OSName.RHEL)
@@ -1773,6 +1798,7 @@
device_name = right
if dhcpv4:
+ self.assertAddrInRange(test_net['ipv4addrs'])
# TODO: source routing not ready for IPv6
ip_addr = test_net['addr']
self.assertSourceRoutingConfiguration(device_name, ip_addr)
--
To view, visit http://gerrit.ovirt.org/36186
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ied7e11e84b8f81c39c799fadfca06b7bde0a409c
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Ondřej Svoboda <osvoboda(a)redhat.com>
8 years, 6 months
Change in vdsm[master]: Network: Flush all configurators before restoring networks
by ibarkan@redhat.com
Ido Barkan has uploaded a new change for review.
Change subject: Network: Flush all configurators before restoring networks
......................................................................
Network: Flush all configurators before restoring networks
Right now, when there are many networks network service takes time to start
up (think DHCP). After it is up, VDSM removes all networks and configures
it's own persistent networks, which can also take quite some time.
This patch teaches 'vdsm-restore-net-config' to conditionally flush the
network configuration and splits this behaviour to a unit that systemd will
start before the network service.
systemd will now manage the following chain of tasks:
1. vdsm-network-cleanup.service will cleanup system files that configurators may
have written on boot before network configuration daemons run, so no time is
lost by them configuring vdsm networks (and conflict avoidance).
2. after libvirt is up, reconfigure the persistent network configuration of vdsm
using the configured net_configurator.
3. start vdsm and super-vdsm
Note that it would be possible with this change to remove from vdsmd.service
the after vdsm-network.service so that vdsm would start in parallel to the
networks being set up. This could be quite useful so that the engine could
communicate with vdsm as soon as the management network is restored. However,
for now, the engine would see the host unsynched and we have to think carefully
if that is okay and how long that would take.
Change-Id: I9e3d0fb35d608a8b019ad80f7e40f4399e7db479
Signed-off-by: Ido Barkan <ibarkan(a)redhat.com>
---
M .gitignore
M init/systemd/Makefile.am
A init/systemd/vdsm-network-cleanup.service.in.py
M init/systemd/vdsm-network.service.in
M init/sysvinit/vdsmd.init.in
M lib/vdsm/tool/restore_nets.py
M vdsm.spec.in
M vdsm/network/configurators/__init__.py
M vdsm/vdsm-restore-net-config
9 files changed, 56 insertions(+), 12 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/67/38767/1
diff --git a/.gitignore b/.gitignore
index 54cbc46..4245b44 100644
--- a/.gitignore
+++ b/.gitignore
@@ -26,6 +26,7 @@
init/systemd/systemd-vdsmd
init/systemd/vdsm-tmpfiles.d.conf
init/systemd/vdsmd.service
+init/systemd/vdsm-network-cleanup.service
init/systemd/vdsm-network.service
init/sysvinit/supervdsmd.init
init/sysvinit/vdsmd.init
diff --git a/init/systemd/Makefile.am b/init/systemd/Makefile.am
index 07c4ae8..000e267 100644
--- a/init/systemd/Makefile.am
+++ b/init/systemd/Makefile.am
@@ -37,6 +37,7 @@
supervdsmd.service.in \
systemd-vdsmd.in \
vdsmd.service.in \
+ vdsm-network-cleanup.service.in \
vdsm-network.service.in \
vdsm-tmpfiles.d.conf.in \
$(NULL)
diff --git a/init/systemd/vdsm-network-cleanup.service.in.py b/init/systemd/vdsm-network-cleanup.service.in.py
new file mode 100644
index 0000000..b2c8dab
--- /dev/null
+++ b/init/systemd/vdsm-network-cleanup.service.in.py
@@ -0,0 +1,13 @@
+[Unit]
+Description=Virtual Desktop Server Manager network system files cleanup
+Wants=network.target
+Before=network.target network.service NetworkManager.service systemd-networkd.service
+
+[Service]
+Type=oneshot
+EnvironmentFile=-/etc/sysconfig/vdsm
+ExecStart=@VDSMDIR@/vdsm-restore-net-config flush
+RemainAfterExit=yes
+
+[Install]
+WantedBy=multi-user.target
\ No newline at end of file
diff --git a/init/systemd/vdsm-network.service.in b/init/systemd/vdsm-network.service.in
index 9eb3e5e..9570ef5 100644
--- a/init/systemd/vdsm-network.service.in
+++ b/init/systemd/vdsm-network.service.in
@@ -1,7 +1,7 @@
[Unit]
Description=Virtual Desktop Server Manager network restoration
Wants=network.target
-Requires=libvirtd.service
+Requires=vdsm-network-cleanup.service libvirtd.service
After=libvirtd.service
[Service]
@@ -9,7 +9,7 @@
EnvironmentFile=-/etc/sysconfig/vdsm
ExecStartPre=@BINDIR@/vdsm-tool --vvverbose --append --logfile=@VDSMLOGDIR(a)/upgrade.log upgrade-unified-persistence
ExecStartPre=@BINDIR@/vdsm-tool --vvverbose --append --logfile=@VDSMLOGDIR(a)/upgrade.log upgrade-3.0.0-networks
-ExecStart=@VDSMDIR@/vdsm-restore-net-config
+ExecStart=@VDSMDIR@/vdsm-restore-net-config restore --no-flush
RemainAfterExit=yes
[Install]
diff --git a/init/sysvinit/vdsmd.init.in b/init/sysvinit/vdsmd.init.in
index 1f8dc64..409ee48 100755
--- a/init/sysvinit/vdsmd.init.in
+++ b/init/sysvinit/vdsmd.init.in
@@ -142,7 +142,7 @@
}
restore_nets(){
- "@PYTHON@" "@VDSMDIR@/vdsm-restore-net-config" || return 1
+ "@PYTHON@" "@VDSMDIR@/vdsm-restore-net-config" restore || return 1
return 0
}
diff --git a/lib/vdsm/tool/restore_nets.py b/lib/vdsm/tool/restore_nets.py
index 99ea9f2..861bf01 100644
--- a/lib/vdsm/tool/restore_nets.py
+++ b/lib/vdsm/tool/restore_nets.py
@@ -38,7 +38,7 @@
def restore():
rc, out, err = utils.execCmd(
- [os.path.join(P_VDSM, 'vdsm-restore-net-config')], raw=True)
+ [os.path.join(P_VDSM, 'vdsm-restore-net-config'), 'restore'], raw=True)
sys.stdout.write(out)
sys.stderr.write(err)
if rc != 0:
diff --git a/vdsm.spec.in b/vdsm.spec.in
index 819178c..e369b33 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -762,6 +762,7 @@
%if 0%{?with_systemd}
install -Dm 0755 init/systemd/systemd-vdsmd %{buildroot}/usr/lib/systemd/systemd-vdsmd
install -Dm 0644 init/systemd/vdsmd.service %{buildroot}%{_unitdir}/vdsmd.service
+install -Dm 0644 init/systemd/vdsm-network-cleanup.service %{buildroot}%{_unitdir}/vdsm-network-cleanup.service
install -Dm 0644 init/systemd/vdsm-network.service %{buildroot}%{_unitdir}/vdsm-network.service
install -Dm 0644 init/systemd/supervdsmd.service %{buildroot}%{_unitdir}/supervdsmd.service
@@ -868,6 +869,7 @@
/bin/systemctl restart systemd-modules-load.service >/dev/null 2>&1 || :
if [ "$1" -eq 1 ] ; then
/bin/systemctl enable vdsmd.service >/dev/null 2>&1 || :
+ /bin/systemctl enable vdsm-network-cleanup.service >/dev/null 2>&1 || :
/bin/systemctl enable vdsm-network.service >/dev/null 2>&1 || :
/bin/systemctl enable supervdsmd.service >/dev/null 2>&1 || :
fi
@@ -905,6 +907,7 @@
%if 0%{?with_systemd}
%systemd_preun vdsmd.service
%systemd_preun vdsm-network.service
+%systemd_preun vdsm-network-cleanup.service
%systemd_preun supervdsmd.service
%else
if [ "$1" -eq 0 ]; then
@@ -912,6 +915,7 @@
/bin/systemctl --no-reload disable supervdsmd.service > /dev/null 2>&1 || :
/bin/systemctl stop vdsmd.service > /dev/null 2>&1 || :
/bin/systemctl stop vdsm-network.service > /dev/null 2>&1 || :
+ /bin/systemctl stop vdsm-network-cleanup.service > /dev/null 2>&1 || :
/bin/systemctl stop supervdsmd.service > /dev/null 2>&1 || :
fi
exit 0
@@ -1015,6 +1019,7 @@
%if 0%{?with_systemd}
/usr/lib/systemd/systemd-vdsmd
%{_unitdir}/vdsmd.service
+%{_unitdir}/vdsm-network-cleanup.service
%{_unitdir}/vdsm-network.service
%{_unitdir}/supervdsmd.service
%else
diff --git a/vdsm/network/configurators/__init__.py b/vdsm/network/configurators/__init__.py
index 39ad9a6..ce6ce05 100644
--- a/vdsm/network/configurators/__init__.py
+++ b/vdsm/network/configurators/__init__.py
@@ -70,7 +70,7 @@
return RunningConfig().diffFrom(self.runningConfig)
def flush(self):
- libvirt.flush()
+ pass
def configureBridge(self, bridge, **opts):
raise NotImplementedError
diff --git a/vdsm/vdsm-restore-net-config b/vdsm/vdsm-restore-net-config
index 8ffb880..bcd3924 100755
--- a/vdsm/vdsm-restore-net-config
+++ b/vdsm/vdsm-restore-net-config
@@ -28,6 +28,7 @@
from vdsm.config import config
from vdsm import netinfo
from vdsm.constants import P_VDSM_RUN
+from vdsm.tool import unified_persistence, upgrade
# Ifcfg persistence restoration
from network.configurators import ifcfg
@@ -35,6 +36,7 @@
# Unified persistence restoration
from network.api import setupNetworks
from network import configurators
+from network.configurators import libvirt
from vdsm.netconfpersistence import RunningConfig, PersistentConfig
import pkgutil
@@ -46,7 +48,7 @@
configWriter.restorePersistentBackup()
-def unified_restoration():
+def unified_restoration(flush_configurators):
"""
Builds a setupNetworks command from the persistent configuration to set it
as running configuration.
@@ -65,8 +67,9 @@
# Flush vdsm configurations left-overs from any configurator on the system
# so that changes of configurator and persistence system are smooth.
- for configurator_cls in _get_all_configurators():
- configurator_cls().flush()
+ if flush_configurators:
+ _configurator_flush()
+ libvirt.flush()
# Restore non-VDSM network devices (BZ#1188251)
configWriter = ifcfg.ConfigWriter()
@@ -143,17 +146,27 @@
os.utime(file_path, None)
-def restore(force_restore=False):
+def restore(force_restore=False, flush_configurators=True):
if not force_restore and _nets_already_restored(_NETS_RESTORED_MARK):
logging.info('networks already restored. doing nothing.')
return
if config.get('vars', 'net_persistence') == 'unified':
- unified_restoration()
+ unified_restoration(flush_configurators)
else:
ifcfg_restoration()
touch_file(_NETS_RESTORED_MARK)
+
+
+def _configurator_flush():
+ """Flush vdsm configurations left-overs from any configurator on the system
+ so that changes of configurator and persistence system are smooth. Only
+ done if upgrade to unified persistence is not pending"""
+ if (not upgrade._upgrade_needed(
+ unified_persistence.UpgradeUnifiedPersistence)):
+ for configurator_cls in _get_all_configurators():
+ configurator_cls().flush()
if __name__ == '__main__':
@@ -166,14 +179,25 @@
level=logging.DEBUG)
logging.error('Could not init proper logging', exc_info=True)
- parser = optparse.OptionParser("usage: %prog [options] nets_restored_mark")
+ parser = optparse.OptionParser("usage: %prog restore|flush [options]")
force_option_help = "the restore action first tests for an existence of " \
"a mark that is made after the last successful " \
"restore action. Unless this option is used, " \
"restore will be a no-op if this mark exists."
parser.add_option('--force', action='store_true', default=False,
help=force_option_help)
+ parser.add_option('--no-flush', dest='flush_configurators', default=True,
+ action='store_false')
options, args = parser.parse_args()
- restore(options.force)
+ if len(args) != 1 or args[0] not in ['restore', 'flush']:
+ parser.error("Wrong number of arguments. only restore or flush are "
+ "allowed")
+ arg, = args
+ if arg == 'restore':
+ restore(options.force, options.flush_configurators)
+ elif arg == 'flush':
+ if not options.flush_configurators:
+ parser.error('flush cannot be used with --no-flush')
+ _configurator_flush()
--
To view, visit https://gerrit.ovirt.org/38767
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I9e3d0fb35d608a8b019ad80f7e40f4399e7db479
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Ido Barkan <ibarkan(a)redhat.com>
8 years, 6 months