Change in vdsm[master]: GuestIF Refactoring
by Vinzenz Feenstra
Vinzenz Feenstra has uploaded a new change for review.
Change subject: GuestIF Refactoring
......................................................................
GuestIF Refactoring
Change-Id: Ib357d770a26ef1dc80b89a32bf6808551a7d622d
Signed-off-by: Vinzenz Feenstra <vfeenstr(a)redhat.com>
---
M vdsm/guestIF.py
1 file changed, 114 insertions(+), 76 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/18/24618/1
diff --git a/vdsm/guestIF.py b/vdsm/guestIF.py
index 229a55d..96ad68c 100644
--- a/vdsm/guestIF.py
+++ b/vdsm/guestIF.py
@@ -39,6 +39,115 @@
union(set(range(0x86, 0x9F + 1)))
+class UnknownMessageError(Exception):
+ def __init__(self, message, args):
+ Exception.__init__(self, 'Unknown or unsupported guest agent message '
+ '"%s" received with args "%s"' % (message,
+ str(args)))
+
+
+class MessageHandler(object):
+ def __init__(self, agent):
+ self.log = agent.log
+ self._agent = agent
+
+ def __call__(self, message, args):
+ handler = self.getattr(self, message.replace('-', '_'), None)
+ if handler:
+ handler(args)
+ else:
+ raise UnknownMessageError(message, args)
+
+ def applications(self, args):
+ self._agent.guestInfo['appsList'] = args['applications']
+
+ def fqdn(self, args):
+ self._agent.guestInfo['guestFQDN'] = args['fqdn']
+
+ def host_name(self, args):
+ self._agent.guestInfo['guestName'] = args['name']
+
+ def os_version(self, args):
+ self._agent.guestInfo['guestOs'] = args['version']
+
+ def session_lock(self, args):
+ self.agent.guestInfo['session'] = 'Locked'
+
+ def session_logoff(self, args):
+ self.agent.guestInfo['session'] = 'LoggedOff'
+
+ def session_logon(self, args):
+ self.agent.guestInfo['session'] = 'UserLoggedOn'
+
+ def session_unlock(self, args):
+ self.agent.guestInfo['session'] = 'Active'
+
+ def session_shutdown(self, args):
+ self.log.debug('Guest system shuts down')
+
+ def session_startup(self, args):
+ self.log.debug('Guest system started or restarted')
+
+ def uninstalled(self, args):
+ self.log.debug('Guest agent was uninstalled')
+ self._agent.guestInfo['appsList'] = []
+
+ def heartbeat(self, args):
+ self._agent.guestStatus = 'Up'
+ self._agent.guestInfo['memUsage'] = int(args['free-ram'])
+ # ovirt-guest-agent reports the following fields in 'memory-stat':
+ # 'mem_total', 'mem_free', 'mem_unused', 'swap_in', 'swap_out',
+ # 'pageflt' and 'majflt'
+ if 'memory-stat' in args:
+ for (k, v) in args['memory-stat'].iteritems():
+ # Convert the value to string since 64-bit integer is not
+ # supported in XMLRPC
+ self._agent.guestInfo['memoryStats'][k] = str(v)
+
+ if 'apiVersion' in args:
+ # The guest agent supports API Versioning
+ self._agent._handleAPIVersion(args['apiVersion'])
+ elif self._agent.effectiveApiVersion != _IMPLICIT_API_VERSION_ZERO:
+ # Older versions of the guest agent (before the introduction
+ # of API versioning) do not report this field
+ # Disable the API if not already disabled (e.g. after
+ # downgrade of the guest agent)
+ self.log.debug("API versioning no longer reported by guest.")
+ self._agent.effectiveApiVersion = _IMPLICIT_API_VERSION_ZERO
+
+ def network_interfaces(self, args):
+ interfaces = []
+ old_ips = ''
+ for iface in args['interfaces']:
+ iface['inet'] = iface.get('inet', [])
+ iface['inet6'] = iface.get('inet6', [])
+ interfaces.append(iface)
+ # Provide the old information which includes
+ # only the IP addresses.
+ old_ips += ' '.join(iface['inet']) + ' '
+ self._agent.guestInfo['netIfaces'] = interfaces
+ self._agent.guestInfo['guestIPs'] = old_ips.strip()
+
+ def active_user(self, args):
+ currentUser = args['name']
+ if ((currentUser != self._agent.guestInfo['username']) and
+ not (currentUser == 'Unknown' and
+ self._agent.guestInfo['username'] == 'None')):
+ self._agent.guestInfo['username'] = currentUser
+ self._agent.guestInfo['lastLogin'] = time.time()
+ self.log.debug("username: %s", repr(self.guestInfo['username']))
+
+ def disks_usage(self, args):
+ disks = []
+ for disk in args['disks']:
+ # Converting to string because XML-RPC doesn't support 64-bit
+ # integers.
+ disk['total'] = str(disk['total'])
+ disk['used'] = str(disk['used'])
+ disks.append(disk)
+ self._agent.guestInfo['disksUsage'] = disks
+
+
def _filterXmlChars(u):
"""
The set of characters allowed in XML documents is described in
@@ -109,6 +218,7 @@
def __init__(self, socketName, channelListener, log, user='Unknown',
ips='', connect=True):
+ self.handler = MessageHandler(self)
self.effectiveApiVersion = _IMPLICIT_API_VERSION_ZERO
self.log = log
self._socketName = socketName
@@ -223,82 +333,10 @@
self.log.log(logging.TRACE, "Guest's message %s: %s", message, args)
if self.guestStatus is None:
self.guestStatus = 'Up'
- if message == 'heartbeat':
- self.guestStatus = 'Up'
- self.guestInfo['memUsage'] = int(args['free-ram'])
- # ovirt-guest-agent reports the following fields in 'memory-stat':
- # 'mem_total', 'mem_free', 'mem_unused', 'swap_in', 'swap_out',
- # 'pageflt' and 'majflt'
- if 'memory-stat' in args:
- for (k, v) in args['memory-stat'].iteritems():
- # Convert the value to string since 64-bit integer is not
- # supported in XMLRPC
- self.guestInfo['memoryStats'][k] = str(v)
-
- if 'apiVersion' in args:
- # The guest agent supports API Versioning
- self._handleAPIVersion(args['apiVersion'])
- elif self.effectiveApiVersion != _IMPLICIT_API_VERSION_ZERO:
- # Older versions of the guest agent (before the introduction
- # of API versioning) do not report this field
- # Disable the API if not already disabled (e.g. after
- # downgrade of the guest agent)
- self.log.debug("API versioning no longer reported by guest.")
- self.effectiveApiVersion = _IMPLICIT_API_VERSION_ZERO
- elif message == 'host-name':
- self.guestInfo['guestName'] = args['name']
- elif message == 'os-version':
- self.guestInfo['guestOs'] = args['version']
- elif message == 'network-interfaces':
- interfaces = []
- old_ips = ''
- for iface in args['interfaces']:
- iface['inet'] = iface.get('inet', [])
- iface['inet6'] = iface.get('inet6', [])
- interfaces.append(iface)
- # Provide the old information which includes
- # only the IP addresses.
- old_ips += ' '.join(iface['inet']) + ' '
- self.guestInfo['netIfaces'] = interfaces
- self.guestInfo['guestIPs'] = old_ips.strip()
- elif message == 'applications':
- self.guestInfo['appsList'] = args['applications']
- elif message == 'active-user':
- currentUser = args['name']
- if ((currentUser != self.guestInfo['username']) and
- not (currentUser == 'Unknown' and
- self.guestInfo['username'] == 'None')):
- self.guestInfo['username'] = currentUser
- self.guestInfo['lastLogin'] = time.time()
- self.log.debug("username: %s", repr(self.guestInfo['username']))
- elif message == 'session-logon':
- self.guestInfo['session'] = "UserLoggedOn"
- elif message == 'session-lock':
- self.guestInfo['session'] = "Locked"
- elif message == 'session-unlock':
- self.guestInfo['session'] = "Active"
- elif message == 'session-logoff':
- self.guestInfo['session'] = "LoggedOff"
- elif message == 'uninstalled':
- self.log.debug("RHEV agent was uninstalled.")
- self.guestInfo['appsList'] = []
- elif message == 'session-startup':
- self.log.debug("Guest system is started or restarted.")
- elif message == 'fqdn':
- self.guestInfo['guestFQDN'] = args['fqdn']
- elif message == 'session-shutdown':
- self.log.debug("Guest system shuts down.")
- elif message == 'disks-usage':
- disks = []
- for disk in args['disks']:
- # Converting to string because XML-RPC doesn't support 64-bit
- # integers.
- disk['total'] = str(disk['total'])
- disk['used'] = str(disk['used'])
- disks.append(disk)
- self.guestInfo['disksUsage'] = disks
- else:
- self.log.error('Unknown message type %s', message)
+ try:
+ self.handler(message, args)
+ except UnknownMessageError as e:
+ self.log.error(e)
def stop(self):
self._stopped = True
--
To view, visit http://gerrit.ovirt.org/24618
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ib357d770a26ef1dc80b89a32bf6808551a7d622d
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Vinzenz Feenstra <vfeenstr(a)redhat.com>
7 years, 2 months
Change in vdsm[master]: [WIP]vdsm: add support for S3/S4 suspend calls
by mpoledni@redhat.com
Martin Polednik has uploaded a new change for review.
Change subject: [WIP]vdsm: add support for S3/S4 suspend calls
......................................................................
[WIP]vdsm: add support for S3/S4 suspend calls
Change-Id: Ic30016c5cd555f5771dde8db3f1340e1c11b3da7
Signed-off-by: Martin Polednik <mpoledni(a)redhat.com>
---
M vdsm/API.py
M vdsm/BindingXMLRPC.py
M vdsm/guestIF.py
M vdsm_api/vdsmapi-schema.json
4 files changed, 58 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/89/19389/1
diff --git a/vdsm/API.py b/vdsm/API.py
index 37bb908..96119ed 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -296,6 +296,20 @@
else:
return errCode['nonresp']
+ def desktopSuspend(self, mode):
+ """
+ Sleep the guest operating system
+ """
+ try:
+ v = self._cif.vmContainer[self._UUID]
+ except KeyError:
+ return errCode['noVM']
+ v.guestAgent.desktopSuspend(mode)
+ if v.guestAgent.isResponsive():
+ return {'status': doneCode}
+ else:
+ return errCode['nonresp']
+
def desktopSendHcCommand(self, message):
"""
Send a command to the guest agent (depricated).
diff --git a/vdsm/BindingXMLRPC.py b/vdsm/BindingXMLRPC.py
index fb65ad4..046bb0c 100644
--- a/vdsm/BindingXMLRPC.py
+++ b/vdsm/BindingXMLRPC.py
@@ -352,6 +352,10 @@
vm = API.VM(vmId)
return vm.desktopLogoff(force)
+ def vmDesktopSuspend(self, vmId, mode):
+ vm = API.VM(vmId)
+ return vm.desktopSuspend(mode)
+
def vmDesktopLock(self, vmId):
vm = API.VM(vmId)
return vm.desktopLock()
@@ -836,6 +840,7 @@
(self.vmMigrationCreate, 'migrationCreate'),
(self.vmDesktopLogin, 'desktopLogin'),
(self.vmDesktopLogoff, 'desktopLogoff'),
+ (self.vmDesktopSuspend, 'desktopSuspend'),
(self.vmDesktopLock, 'desktopLock'),
(self.vmDesktopSendHcCommand, 'sendHcCmdToDesktop'),
(self.vmHibernate, 'hibernate'),
diff --git a/vdsm/guestIF.py b/vdsm/guestIF.py
index 6f09ef1..2bb654b 100644
--- a/vdsm/guestIF.py
+++ b/vdsm/guestIF.py
@@ -299,6 +299,15 @@
except:
self.log.error("desktopLogoff failed", exc_info=True)
+ def desktopSuspend(self, mode):
+ try:
+ self.log.debug('desktopSleep called')
+ cmds = ('guest-suspend-ram', 'guest-suspend-hybrid',
+ 'guest-suspend-disk')
+ self._forward(cmds['mode'], {'success_response': 'yes'})
+ except:
+ self.log.debug('desktopSleep failed', exc_info=True)
+
def desktopShutdown(self, timeout, msg):
try:
self.log.debug("desktopShutdown called")
diff --git a/vdsm_api/vdsmapi-schema.json b/vdsm_api/vdsmapi-schema.json
index 27c12c1..bc54041 100644
--- a/vdsm_api/vdsmapi-schema.json
+++ b/vdsm_api/vdsmapi-schema.json
@@ -5079,6 +5079,36 @@
'data': {'vmID': 'UUID', 'force': 'bool'}}
##
+# @guestSuspendMode
+#
+# Enumeration of supported suspend modes
+#
+# @ram: S3 sleep
+#
+# @disk: S4 sleep
+#
+# @hybrid: Combination of @ram and @disk
+#
+# Since: 4.12.0
+##
+{'enum': 'guestSuspendMode', 'data': ['ram', 'disk', 'hybrid']}
+
+
+##
+# @VM.desktopSuspend:
+#
+# Suspend the guest operating system
+#
+# @vmID: The UUID of the VM
+#
+# @mode: Type of suspension
+#
+# Since: 4.12.0
+##
+{'command': {'class': 'VM', 'name': 'desktopSuspend'},
+ 'data': {'vmID': 'UUID', 'mode': 'guestSuspendMode'}}
+
+##
# @VM.desktopSendHcCommand:
#
# Send an arbitrary command to the guest agent.
--
To view, visit http://gerrit.ovirt.org/19389
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ic30016c5cd555f5771dde8db3f1340e1c11b3da7
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Martin Polednik <mpoledni(a)redhat.com>
8 years, 1 month
Change in vdsm[master]: vdsm: add support for hot(un)plug of pci-passthrough devices
by mpoledni@redhat.com
Martin Polednik has uploaded a new change for review.
Change subject: vdsm: add support for hot(un)plug of pci-passthrough devices
......................................................................
vdsm: add support for hot(un)plug of pci-passthrough devices
Hot(un)plug ability for pci-passthrough devices is extremely important
in order to allow for hotunplugging/migrating/hotplugging workflow as VM
cannot be migrated with pci device attached.
This patch implements the ability adding new API verbs: hotplugHostdev
and hotunplugHostdev, which attempts to append/remove the device from
domain XML and reports back the state.
Change-Id: I8fbf4a1d62789d9404e5977eb7eb01b17a1a43fb
Signed-off-by: Martin Polednik <mpoledni(a)redhat.com>
---
M client/vdsClient.py
M vdsm/API.py
M vdsm/BindingXMLRPC.py
M vdsm/vm.py
M vdsm_api/vdsmapi-schema.json
5 files changed, 166 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/23/22523/1
diff --git a/client/vdsClient.py b/client/vdsClient.py
index d549500..f00180e 100644
--- a/client/vdsClient.py
+++ b/client/vdsClient.py
@@ -245,6 +245,14 @@
params = {'vmId': args[0], 'drive': drive}
return self.ExecAndExit(self.s.hotunplugDisk(params))
+ def hotplugHostdev(self, args):
+ params = {'vmId': args[0], 'hostdevName': args[1]}
+ return self.ExecAndExit(self.s.hotplugNic(params))
+
+ def hotunplugHostdev(self, args):
+ params = {'vmId': args[0], 'hostdevName': args[2]}
+ return self.ExecAndExit(self.s.hotunplugNic(params))
+
def do_changeCD(self, args):
vmId = args[0]
file = self._parseDriveSpec(args[1])
diff --git a/vdsm/API.py b/vdsm/API.py
index 44d5817..51eb506 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -456,6 +456,40 @@
return curVm.hotunplugDisk(params)
+ def hotplugHostdev(self, params):
+ try:
+ utils.validateMinimalKeySet(params, ('vmId', 'hostdevName'))
+ except ValueError:
+ self.log.error('Missing one of required parameters: vmId, '
+ 'hostdevName')
+ return {'status': {'code': errCode['MissParam']['status']['code'],
+ 'message': 'Missing one of required '
+ 'parameters: vmId, hostdevName'}}
+ try:
+ curVm = self._cif.vmContainer[self._UUID]
+ except KeyError:
+ self.log.warning("vm %s doesn't exist", self._UUID)
+ return errCode['noVM']
+
+ return curVm.hotplugHostdev(params)
+
+ def hotunplugHostdev(self, params):
+ try:
+ utils.validateMinimalKeySet(params, ('vmId', 'hostdevName'))
+ except ValueError:
+ self.log.error('Missing one of required parameters: vmId, '
+ 'hostdevName')
+ return {'status': {'code': errCode['MissParam']['status']['code'],
+ 'message': 'Missing one of required '
+ 'parameters: vmId, hostdevName'}}
+ try:
+ curVm = self._cif.vmContainer[self._UUID]
+ except KeyError:
+ self.log.warning("vm %s doesn't exist", self._UUID)
+ return errCode['noVM']
+
+ return curVm.hotunplugHostdev(params)
+
def migrate(self, params):
"""
Migrate a VM to a remote host.
diff --git a/vdsm/BindingXMLRPC.py b/vdsm/BindingXMLRPC.py
index 5bcd84c..847ceaf 100644
--- a/vdsm/BindingXMLRPC.py
+++ b/vdsm/BindingXMLRPC.py
@@ -279,6 +279,14 @@
vm = API.VM(params['vmId'])
return vm.hotunplugNic(params)
+ def vmHotplugHostdev(self, params):
+ vm = API.VM(params['vmId'])
+ return vm.hotplugHostdev(params)
+
+ def vmHotunplugHostdev(self, params):
+ vm = API.VM(params['vmId'])
+ return vm.hotunplugHostdev(params)
+
def vmUpdateDevice(self, vmId, params):
vm = API.VM(vmId)
return vm.vmUpdateDevice(params)
@@ -861,6 +869,8 @@
(self.vmHotunplugDisk, 'hotunplugDisk'),
(self.vmHotplugNic, 'hotplugNic'),
(self.vmHotunplugNic, 'hotunplugNic'),
+ (self.vmHotplugHostdev, 'hotplugHostdev'),
+ (self.vmHotunplugHostdev, 'hotunplugHostdev'),
(self.vmUpdateDevice, 'vmUpdateDevice'))
def getIrsMethods(self):
diff --git a/vdsm/vm.py b/vdsm/vm.py
index 90a6c0d..224bf7b 100644
--- a/vdsm/vm.py
+++ b/vdsm/vm.py
@@ -3183,6 +3183,84 @@
return {'status': doneCode, 'vmList': self.status()}
+ def hotplugHostdev(self, params):
+ hostdevName = params['hostdevName']
+ hostdev = HostDevice(self.conf, self.log, **hostdevName)
+ hostdevXML = hostdev.getXML().toprettyxml(encoding='utf-8')
+ hostdev._deviceXML = hostdevXML
+ self.log.debug("Hotplug hostdev xml: %s", hostdevXML)
+
+ try:
+ self._dom.attachDevice(hostdevXML)
+ except libvirt.libvirtError as e:
+ self.log.error("Hotplug failed", exc_info=True)
+ if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
+ return errCode['noVM']
+ return {'status': {'code': errCode['hotplugNic']['status']['code'],
+ 'message': e.message}}
+ else:
+ # FIXME! We may have a problem here if vdsm dies right after
+ # we sent command to libvirt and before save conf. In this case
+ # we will gather almost all needed info about this NIC from
+ # the libvirt during recovery process.
+ self._devices[HOSTDEV_DEVICES].append(hostdev)
+ with self._confLock:
+ self.conf['devices'].append(hostdev)
+ self.saveState()
+
+ return {'status': doneCode, 'vmList': self.status()}
+
+ def hotunplugHostdev(self, params):
+ hostdevName = params['hostdevName']
+
+ # Find hostdev object in vm's hostdev list
+ hostdev = None
+ for dev in self._devices[HOSTDEV_DEVICES][:]:
+ if dev.name == hostdevName:
+ hostdev = dev
+ break
+
+ if hostdev:
+ hostdevXML = hostdev.getXML().toprettyxml(encoding='utf-8')
+ self.log.debug("Hotunplug hostdev xml: %s", hostdevXML)
+ self._devices[HOSTDEV_DEVICES].remove(hostdev)
+ else:
+ self.log.error("Hotunplug hostdev failed - hostdev not found: %s",
+ hostdevName)
+ return {'status': {'code': errCode['hotunplugNic']
+ ['status']['code'],
+ 'message': "NIC not found"}}
+
+ hostdevDev = None
+ for dev in self.conf['devices'][:]:
+ if (dev['type'] == HOSTDEV_DEVICES and
+ dev['name'] == hostdevName):
+ hostdevDev = dev
+ with self._confLock:
+ self.conf['devices'].remove(dev)
+ break
+
+ self.saveState()
+
+ try:
+ self._dom.detachDevice(hostdevXML)
+ except libvirt.libvirtError as e:
+ self.log.error("Hotunplug failed", exc_info=True)
+ if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
+ return errCode['noVM']
+ # Restore hostdev device in vm's conf and _devices
+ if hostdevDev:
+ with self._confLock:
+ self.conf['devices'].append(hostdevDev)
+ if hostdev:
+ self._devices[HOSTDEV_DEVICES].append(hostdev)
+ self.saveState()
+ return {
+ 'status': {'code': errCode['hotunplugNic']['status']['code'],
+ 'message': e.message}}
+
+ return {'status': doneCode, 'vmList': self.status()}
+
def _lookupDeviceByAlias(self, devType, alias):
for dev in self._devices[devType][:]:
if dev.alias == alias:
diff --git a/vdsm_api/vdsmapi-schema.json b/vdsm_api/vdsmapi-schema.json
index 1d4e499..85f2b28 100644
--- a/vdsm_api/vdsmapi-schema.json
+++ b/vdsm_api/vdsmapi-schema.json
@@ -5793,6 +5793,42 @@
'returns': 'VmDefinition'}
##
+# @VM.hotplugHostdev:
+#
+# Add a new host device to a running VM.
+#
+# @vmID: The UUID of the VM
+#
+# @hostdevName: The name of host's device
+#
+# Returns:
+# The VM definition, as updated
+#
+# Since: 4.14.0
+##
+{'command': {'class': 'VM', 'name': 'hotplugHostdev'},
+ 'data': {'vmID': 'UUID', 'hostdevName': 'str'},
+ 'returns': 'VmDefinition'}
+
+##
+# @VM.hotunplugHostdev:
+#
+# Remove a host device from a running VM.
+#
+# @vmID: The UUID of the VM
+#
+# @hostdevName: The name of host's device
+#
+# Returns:
+# The VM definition, as updated
+#
+# Since: 4.14.0
+##
+{'command': {'class': 'VM', 'name': 'hotunplugHostdev'},
+ 'data': {'vmID': 'UUID', 'hostdevName': 'str'},
+ 'returns': 'VmDefinition'}
+
+##
# @MigrateMode:
#
# An enumeration of VM migration modes.
--
To view, visit http://gerrit.ovirt.org/22523
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I8fbf4a1d62789d9404e5977eb7eb01b17a1a43fb
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Martin Polednik <mpoledni(a)redhat.com>
8 years, 1 month
Change in vdsm[master]: image: unify the prezeroing optimizations
by Federico Simoncelli
Federico Simoncelli has uploaded a new change for review.
Change subject: image: unify the prezeroing optimizations
......................................................................
image: unify the prezeroing optimizations
The same prezeroing optimization logic was used in multiple places, this
patch unifies it in __optimizedCreateVolume.
Change-Id: I0fd90f85e9debf98bcac07d1b8d4b38c319c33f2
Signed-off-by: Federico Simoncelli <fsimonce(a)redhat.com>
---
M vdsm/storage/image.py
1 file changed, 43 insertions(+), 45 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/04/8504/1
diff --git a/vdsm/storage/image.py b/vdsm/storage/image.py
index e86d94c..19ab078 100644
--- a/vdsm/storage/image.py
+++ b/vdsm/storage/image.py
@@ -454,6 +454,37 @@
except Exception:
self.log.error("Unexpected error", exc_info=True)
+ def __optimizedCreateVolume(self, domain, imgUUID, size, apparentSize,
+ volFormat, preallocate, diskType, volUUID, desc, srcImgUUID,
+ srcVolUUID):
+ # To avoid 'prezeroing' preallocated volume on NFS domain,
+ # we create the target volume with minimal size and after
+ # that we'll change its metadata back to the original size.
+ if (volFormat == volume.COW_FORMAT
+ or preallocate == volume.SPARSE_VOL):
+ volTmpSize = size
+ else:
+ volTmpSize = TEMPORARY_VOLUME_SIZE
+
+ domain.createVolume(imgUUID, volTmpSize, volFormat, preallocate,
+ diskType, volUUID, desc, srcImgUUID, srcVolUUID)
+ newVolume = domain.produceVolume(imgUUID, volUUID)
+
+ if volFormat == volume.RAW_FORMAT:
+ extendSize = size
+ else:
+ extendSize = apparentSize
+
+ # Extend volume (for LV only) size to the actual size
+ newVolume.extend((extendSize + 511) / 512)
+
+ # Change destination volume metadata back to the original
+ # size. Heavy operation, do it only if necessary.
+ if volTmpSize != size:
+ newVolume.setSize(size)
+
+ return newVolume
+
def _createTargetImage(self, destDom, srcSdUUID, imgUUID):
# Before actual data copying we need perform several operation
# such as: create all volumes, create fake template if needed, ...
@@ -500,34 +531,12 @@
# find out src volume parameters
volParams = srcVol.getVolumeParams(bs=1)
- # To avoid 'prezeroing' preallocated volume on NFS domain,
- # we create the target volume with minimal size and after
- # that w'll change its metadata back to the original size.
- if (volParams['volFormat'] == volume.COW_FORMAT
- or volParams['prealloc'] == volume.SPARSE_VOL):
- volTmpSize = volParams['size']
- else:
- volTmpSize = TEMPORARY_VOLUME_SIZE # in sectors (10M)
-
- destDom.createVolume(imgUUID=imgUUID, size=volTmpSize,
- volFormat=volParams['volFormat'],
- preallocate=volParams['prealloc'],
- diskType=volParams['disktype'],
- volUUID=srcVol.volUUID,
- desc=volParams['descr'],
- srcImgUUID=pimg,
- srcVolUUID=volParams['parent'])
-
- dstVol = destDom.produceVolume(imgUUID=imgUUID,
- volUUID=srcVol.volUUID)
-
- # Extend volume (for LV only) size to the actual size
- dstVol.extend((volParams['apparentsize'] + 511) / 512)
-
- # Change destination volume metadata back to the original
- # size.
- if volTmpSize != volParams['size']:
- dstVol.setSize(volParams['size'])
+ dstVol = self.__optimizedCreateVolume(
+ destDom, imgUUID, volParams['size'],
+ volParams['apparentsize'], volParams['volFormat'],
+ volParams['prealloc'], volParams['disktype'],
+ srcVol.volUUID, volParams['descr'], srcImgUUID=pimg,
+ srcVolUUID=volParams['parent'])
dstChain.append(dstVol)
except se.StorageException:
@@ -760,25 +769,14 @@
self.log.info("delete image %s on domain %s before overwriting", dstImgUUID, dstSdUUID)
self.delete(dstSdUUID, dstImgUUID, postZero, force=True)
- # To avoid 'prezeroing' preallocated volume on NFS domain,
- # we create the target volume with minimal size and after that w'll change
- # its metadata back to the original size.
- tmpSize = TEMPORARY_VOLUME_SIZE # in sectors (10M)
- destDom.createVolume(imgUUID=dstImgUUID, size=tmpSize,
- volFormat=dstVolFormat, preallocate=volParams['prealloc'],
- diskType=volParams['disktype'], volUUID=dstVolUUID, desc=descr,
- srcImgUUID=volume.BLANK_UUID, srcVolUUID=volume.BLANK_UUID)
+ dstVol = self.__optimizedCreateVolume(
+ destDom, dstImgUUID, volParams['size'],
+ volParams['apparentsize'], dstVolFormat,
+ volParams['prealloc'], volParams['disktype'],
+ dstVolUUID, descr, volume.BLANK_UUID,
+ volume.BLANK_UUID)
- dstVol = sdCache.produce(dstSdUUID).produceVolume(imgUUID=dstImgUUID, volUUID=dstVolUUID)
- # For convert to 'raw' we need use the virtual disk size instead of apparent size
- if dstVolFormat == volume.RAW_FORMAT:
- newsize = volParams['size']
- else:
- newsize = volParams['apparentsize']
- dstVol.extend(newsize)
dstPath = dstVol.getVolumePath()
- # Change destination volume metadata back to the original size.
- dstVol.setSize(volParams['size'])
except se.StorageException, e:
self.log.error("Unexpected error", exc_info=True)
raise
--
To view, visit http://gerrit.ovirt.org/8504
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I0fd90f85e9debf98bcac07d1b8d4b38c319c33f2
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Federico Simoncelli <fsimonce(a)redhat.com>
8 years, 1 month
Change in vdsm[master]: [wip] sdcache: avoid extra refresh due samplingmethod
by Federico Simoncelli
Federico Simoncelli has uploaded a new change for review.
Change subject: [wip] sdcache: avoid extra refresh due samplingmethod
......................................................................
[wip] sdcache: avoid extra refresh due samplingmethod
In order to avoid an extra iscsi rescan (symptomatic of samplingmethod)
an additional lock has been introduced to queue the requests when the
storage is flagged as stale.
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=870768
Change-Id: If178a8eaeb94f1dfe9e0957036dde88f6a22829c
Signed-off-by: Federico Simoncelli <fsimonce(a)redhat.com>
---
M vdsm/storage/sdc.py
1 file changed, 25 insertions(+), 26 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/74/9274/1
diff --git a/vdsm/storage/sdc.py b/vdsm/storage/sdc.py
index f2f4534..978e3fa 100644
--- a/vdsm/storage/sdc.py
+++ b/vdsm/storage/sdc.py
@@ -62,32 +62,27 @@
STORAGE_UPDATED = 0
STORAGE_STALE = 1
- STORAGE_REFRESHING = 2
def __init__(self, storage_repo):
- self._syncroot = threading.Condition()
+ self._syncDomain = threading.Condition()
+ self._syncRefresh = threading.Lock()
self.__domainCache = {}
self.__inProgress = set()
self.__staleStatus = self.STORAGE_STALE
self.storage_repo = storage_repo
def invalidateStorage(self):
- with self._syncroot:
- self.__staleStatus = self.STORAGE_STALE
+ self.log.debug("The storages have been invalidated")
+ self.__staleStatus = self.STORAGE_STALE
@misc.samplingmethod
def refreshStorage(self):
- self.__staleStatus = self.STORAGE_REFRESHING
-
+ # We need to set the __staleStatus value at the beginning because we
+ # want to keep track of the future invalidateStorage calls that might
+ # arrive during the rescan procedure.
+ self.__staleStatus = self.STORAGE_UPDATED
multipath.rescan()
lvm.invalidateCache()
-
- # If a new invalidateStorage request came in after the refresh
- # started then we cannot flag the storages as updated (force a
- # new rescan later).
- with self._syncroot:
- if self.__staleStatus == self.STORAGE_REFRESHING:
- self.__staleStatus = self.STORAGE_UPDATED
def produce(self, sdUUID):
domain = DomainProxy(self, sdUUID)
@@ -98,7 +93,7 @@
return domain
def _realProduce(self, sdUUID):
- with self._syncroot:
+ with self._syncDomain:
while True:
domain = self.__domainCache.get(sdUUID)
@@ -109,25 +104,29 @@
self.__inProgress.add(sdUUID)
break
- self._syncroot.wait()
+ self._syncDomain.wait()
try:
- # If multiple calls reach this point and the storage is not
- # updated the refreshStorage() sampling method is called
- # serializing (and eventually grouping) the requests.
- if self.__staleStatus != self.STORAGE_UPDATED:
- self.refreshStorage()
+ # Here we cannot take full advantage of the refreshStorage
+ # samplingmethod since we might be scheduling an unneeded
+ # extra rescan. We need an additional lock (_syncRefresh)
+ # to make sure that __staleStatus is taken in account
+ # (without affecting all the other external refreshStorage
+ # calls as it would be if we move this check there).
+ with self._syncRefresh:
+ if self.__staleStatus != self.STORAGE_UPDATED:
+ self.refreshStorage()
domain = self._findDomain(sdUUID)
- with self._syncroot:
+ with self._syncDomain:
self.__domainCache[sdUUID] = domain
return domain
finally:
- with self._syncroot:
+ with self._syncDomain:
self.__inProgress.remove(sdUUID)
- self._syncroot.notifyAll()
+ self._syncDomain.notifyAll()
def _findDomain(self, sdUUID):
import blockSD
@@ -162,16 +161,16 @@
return uuids
def refresh(self):
- with self._syncroot:
+ with self._syncDomain:
lvm.invalidateCache()
self.__domainCache.clear()
def manuallyAddDomain(self, domain):
- with self._syncroot:
+ with self._syncDomain:
self.__domainCache[domain.sdUUID] = domain
def manuallyRemoveDomain(self, sdUUID):
- with self._syncroot:
+ with self._syncDomain:
try:
del self.__domainCache[sdUUID]
except KeyError:
--
To view, visit http://gerrit.ovirt.org/9274
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: If178a8eaeb94f1dfe9e0957036dde88f6a22829c
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Federico Simoncelli <fsimonce(a)redhat.com>
8 years, 1 month
Change in vdsm[master]: [WIP] BZ#844656 Release the lock during _findDomain
by Federico Simoncelli
Federico Simoncelli has uploaded a new change for review.
Change subject: [WIP] BZ#844656 Release the lock during _findDomain
......................................................................
[WIP] BZ#844656 Release the lock during _findDomain
Signed-off-by: Federico Simoncelli <fsimonce(a)redhat.com>
Change-Id: I8088d5fe716a3a08c3e5cef2d2d9a654ee96f60a
---
M vdsm/storage/sdc.py
1 file changed, 21 insertions(+), 7 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/22/6822/1
--
To view, visit http://gerrit.ovirt.org/6822
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I8088d5fe716a3a08c3e5cef2d2d9a654ee96f60a
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Federico Simoncelli <fsimonce(a)redhat.com>
8 years, 1 month
Change in vdsm[master]: get the status of core dump
by shaohef@linux.vnet.ibm.com
ShaoHe Feng has uploaded a new change for review.
Change subject: get the status of core dump
......................................................................
get the status of core dump
Change-Id: I5d552db4dbd88762950ec5a113a25c13b73319c8
Signed-off-by: ShaoHe Feng <shaohef(a)linux.vnet.ibm.com>
---
M vdsm/API.py
M vdsm/BindingXMLRPC.py
M vdsm/vm.py
M vdsm_api/vdsmapi-schema.json
M vdsm_cli/vdsClient.py
5 files changed, 36 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/31/11131/1
diff --git a/vdsm/API.py b/vdsm/API.py
index c5f7d40..6b4663a 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -302,6 +302,15 @@
return errCode['noVM']
return v.dumpCancel()
+ def dumpStatus(self):
+ """
+ Report status of a currently outgoing core dump process.
+ """
+ v = self._cif.vmContainer.get(self._UUID)
+ if not v:
+ return errCode['noVM']
+ return v.dumpStatus()
+
def desktopLock(self):
"""
Lock user session in guest operating system using guest agent.
diff --git a/vdsm/BindingXMLRPC.py b/vdsm/BindingXMLRPC.py
index 17d97b1..b1f22fd 100644
--- a/vdsm/BindingXMLRPC.py
+++ b/vdsm/BindingXMLRPC.py
@@ -219,6 +219,10 @@
vm = API.VM(vmId)
return vm.dumpCancel()
+ def vmCoreDumpStatus(self, vmId):
+ vm = API.VM(vmId)
+ return vm.dumpStatus()
+
def vmReset(self, vmId):
vm = API.VM(vmId)
return vm.reset()
@@ -769,6 +773,7 @@
(self.vmCont, 'cont'),
(self.vmCoreDump, 'coreDump'),
(self.vmCoreDumpCancel, 'dumpCancel'),
+ (self.vmCoreDumpStatus, 'dumpStatus'),
(self.vmSnapshot, 'snapshot'),
(self.vmMerge, 'merge'),
(self.vmMergeStatus, 'mergeStatus'),
diff --git a/vdsm/vm.py b/vdsm/vm.py
index 0a40e97..5d9c0d9 100644
--- a/vdsm/vm.py
+++ b/vdsm/vm.py
@@ -1371,3 +1371,6 @@
return reportError(msg=e.message)
finally:
self._guestCpuLock.release()
+
+ def dumpStatus(self):
+ return self._doCoredumpThread.getStat()
diff --git a/vdsm_api/vdsmapi-schema.json b/vdsm_api/vdsmapi-schema.json
index 39d1cba..e96f01f 100644
--- a/vdsm_api/vdsmapi-schema.json
+++ b/vdsm_api/vdsmapi-schema.json
@@ -5484,6 +5484,16 @@
{'command': {'class': 'VM', 'name': 'dumpCancel'}}
##
+# @VM.dumpStatus:
+#
+# Reports the state of the currently core dump process
+#
+# Since: 4.10.4
+#
+##
+{'command': {'class': 'VM', 'name': 'dumpStatus'}}
+
+##
# @VM.monitorCommand:
#
# Send a command to the qemu monitor.
diff --git a/vdsm_cli/vdsClient.py b/vdsm_cli/vdsClient.py
index 32ad348..7edc674 100644
--- a/vdsm_cli/vdsClient.py
+++ b/vdsm_cli/vdsClient.py
@@ -1674,6 +1674,11 @@
response = self.s.dumpCancel(vmId)
return response['status']['code'], response['status']['message']
+ def do_dumpStat(self, args):
+ vmId = args[0]
+ response = self.s.dumpStatus(vmId)
+ return response['status']['code'], response['status']['message']
+
def coreDump(self, args):
dumpParams = {'crash': False,
'live': False,
@@ -2422,6 +2427,10 @@
('<vmId>',
'cancel machine core dump'
)),
+ 'coreDumpStatus': (serv.do_dumpStat,
+ ('<vmId>',
+ 'Check the progress of current core dump'
+ )),
'coreDump': (serv.coreDump,
('<vmId> <file> [live=<True|False>] '
'[crash=<True|False>] [bypass-cache=<True|False>] '
--
To view, visit http://gerrit.ovirt.org/11131
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I5d552db4dbd88762950ec5a113a25c13b73319c8
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: ShaoHe Feng <shaohef(a)linux.vnet.ibm.com>
8 years, 1 month
Change in vdsm[master]: dump the core of a domain
by shaohef@linux.vnet.ibm.com
ShaoHe Feng has uploaded a new change for review.
Change subject: dump the core of a domain
......................................................................
dump the core of a domain
libvirt support an API to dump the core of a domain on a given file for
analysis when guest OS crash.
There are two kind of dump files. one is QEMU suspend to disk image.
the other is core file which like kdump file butcontains registers'
value.
It's helpful to support by VDSM to find root cause if a guest gets hang
and kdump isn't set up in it. This would be a good RAS feature.
Here's the definition of the new API:
coreDump:
This method will dump the core of a domain on a given file for
analysis.
Input parameter:
vmId - VM UUID
to - the core file named by the user
flags - defined in libvirt.py
VIR_DUMP_CRASH
VIR_DUMP_LIVE
VIR_DUMP_BYPASS_CACHE
VIR_DUMP_RESET
VIR_DUMP_MEMORY_ONLY
Return value:
success: return doneCode
failure: return errCode including underlying libvirt error message.
Change-Id: If4aac9e747dc7aa64a6ff5ef256a7a4375aa2bb5
Signed-off-by: ShaoHe Feng <shaohef(a)linux.vnet.ibm.com>
---
M vdsm/API.py
M vdsm/BindingXMLRPC.py
M vdsm/define.py
M vdsm/libvirtvm.py
M vdsm_cli/vdsClient.py
5 files changed, 80 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/29/7329/1
diff --git a/vdsm/API.py b/vdsm/API.py
index 19cbb42..e2b24cb 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -244,6 +244,12 @@
self.log.debug("Error creating VM", exc_info=True)
return errCode['unexpected']
+ def coreDump(self, to, flags):
+ v = self._cif.vmContainer.get(self._UUID)
+ if not v:
+ return errCode['noVM']
+ return v.coreDump(to, flags)
+
def desktopLock(self):
"""
Lock user session in guest operating system using guest agent.
diff --git a/vdsm/BindingXMLRPC.py b/vdsm/BindingXMLRPC.py
index cc5300f..be71e6a 100644
--- a/vdsm/BindingXMLRPC.py
+++ b/vdsm/BindingXMLRPC.py
@@ -208,6 +208,10 @@
vm = API.VM(vmId)
return vm.cont()
+ def vmCoreDump(self, vmId, to, flags):
+ vm = API.VM(vmId)
+ return vm.coreDump(to, flags)
+
def vmReset(self, vmId):
vm = API.VM(vmId)
return vm.reset()
@@ -725,6 +729,7 @@
(self.getVMList, 'list'),
(self.vmPause, 'pause'),
(self.vmCont, 'cont'),
+ (self.vmCoreDump, 'coreDump'),
(self.vmSnapshot, 'snapshot'),
(self.vmMerge, 'merge'),
(self.vmMergeStatus, 'mergeStatus'),
diff --git a/vdsm/define.py b/vdsm/define.py
index 31deb4f..1fedac5 100644
--- a/vdsm/define.py
+++ b/vdsm/define.py
@@ -114,6 +114,10 @@
'mergeErr': {'status':
{'code': 52,
'message': 'Merge failed'}},
+ 'coreDumpErr': {'status':
+ {'code': 54,
+ 'message':
+ 'Failed to get coreDump file'}},
'recovery': {'status':
{'code': 99,
'message':
diff --git a/vdsm/libvirtvm.py b/vdsm/libvirtvm.py
index 4554fee..cbd9f96 100644
--- a/vdsm/libvirtvm.py
+++ b/vdsm/libvirtvm.py
@@ -1904,6 +1904,27 @@
self.saveState()
+ def coreDump(self, to, flags):
+
+ def reportError(key='coreDumpErr', msg=None):
+ self.log.error("get coreDump failed", exc_info=True)
+ if msg == None:
+ error = errCode[key]
+ else:
+ error = {'status' : {'code': errCode[key] \
+ ['status']['code'], 'message': msg}}
+ return error
+
+ if self._dom == None:
+ return reportError()
+ try:
+ self._dom.coreDump(to, flags)
+ except libvirt.libvirtError, e:
+ if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
+ return reportError(key='noVM')
+ return reportError(msg=e.message)
+ return {'status': doneCode}
+
def changeCD(self, drivespec):
return self._changeBlockDev('cdrom', 'hdc', drivespec)
diff --git a/vdsm_cli/vdsClient.py b/vdsm_cli/vdsClient.py
index eeb7c95..cdcd3a8 100644
--- a/vdsm_cli/vdsClient.py
+++ b/vdsm_cli/vdsClient.py
@@ -1589,6 +1589,33 @@
return status['status']['code'], status['status']['message']
+ def coreDump(self, args):
+ DUMPFLAGS = {'crash': 1 << 0,
+ 'live': 1 << 1,
+ 'bypass-cache': 1 << 2,
+ 'reset': 1 << 3,
+ 'memory-only': 1 << 4}
+ flags = 0
+ vmId = args[0]
+ coreFile = args[1]
+ params = {}
+ if len(args) > 2:
+ for arg in args[2:]:
+ kv = arg.split('=', 1)
+ if len(kv) < 2:
+ params[kv[0]] = "True"
+ else:
+ params[kv[0]] = kv[1]
+ for k, v in params.items():
+ if v.lower() == "true" or not v:
+ try:
+ flags = flags + DUMPFLAGS[k]
+ except KeyError:
+ print "unrecognized optoin %s for cormDump command" % k
+ response = self.s.coreDump(vmId, coreFile, flags)
+ return response['status']['code'], response['status']['message']
+
+
if __name__ == '__main__':
if _glusterEnabled:
serv = ge.GlusterService()
@@ -2239,6 +2266,23 @@
('<vmId> <sdUUID> <imgUUID> <baseVolUUID> <volUUID>',
"Take a live snapshot"
)),
+ 'coreDump': (serv.coreDump,
+ ('<vmId> <file> [live=<True>] '
+ '[crash=<True>] [bypass-cache=<True>] '
+ '[reset=<True>] [memory-only=<True>]',
+ "get memeory dump or migration file"
+ 'optional params:',
+ 'crash: crash the domain after core dump'
+ 'default False',
+ 'live: perform a live core dump if supported, '
+ 'default False',
+ 'bypass-cache: avoid file system cache when saving'
+ 'default False',
+ 'reset: reset the domain after core dump'
+ 'default False',
+ "memory-only: dump domain's memory only"
+ 'default False'
+ )),
}
if _glusterEnabled:
commands.update(ge.getGlusterCmdDict(serv))
--
To view, visit http://gerrit.ovirt.org/7329
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: If4aac9e747dc7aa64a6ff5ef256a7a4375aa2bb5
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: ShaoHe Feng <shaohef(a)linux.vnet.ibm.com>
8 years, 1 month
Change in vdsm[master]: cancel the core dump of a VM
by shaohef@linux.vnet.ibm.com
ShaoHe Feng has uploaded a new change for review.
Change subject: cancel the core dump of a VM
......................................................................
cancel the core dump of a VM
Change-Id: I2fa9e82cfbd43c9edb98fac9af41eb0deb0c67ad
Signed-off-by: ShaoHe Feng <shaohef(a)linux.vnet.ibm.com>
---
M vdsm/API.py
M vdsm/BindingXMLRPC.py
M vdsm/define.py
M vdsm/vm.py
M vdsm_api/vdsmapi-schema.json
M vdsm_cli/vdsClient.py
6 files changed, 62 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/30/11130/1
diff --git a/vdsm/API.py b/vdsm/API.py
index 4f5eed8..c5f7d40 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -293,6 +293,15 @@
return errCode['noVM']
return v.coreDump(to, dumpParams)
+ def dumpCancel(self):
+ """
+ Cancel a currently outgoing core dump process.
+ """
+ v = self._cif.vmContainer.get(self._UUID)
+ if not v:
+ return errCode['noVM']
+ return v.dumpCancel()
+
def desktopLock(self):
"""
Lock user session in guest operating system using guest agent.
diff --git a/vdsm/BindingXMLRPC.py b/vdsm/BindingXMLRPC.py
index 9fcbefd..17d97b1 100644
--- a/vdsm/BindingXMLRPC.py
+++ b/vdsm/BindingXMLRPC.py
@@ -215,6 +215,10 @@
vm = API.VM(vmId)
return vm.coreDump(to, params)
+ def vmCoreDumpCancel(self, vmId):
+ vm = API.VM(vmId)
+ return vm.dumpCancel()
+
def vmReset(self, vmId):
vm = API.VM(vmId)
return vm.reset()
@@ -764,6 +768,7 @@
(self.vmPause, 'pause'),
(self.vmCont, 'cont'),
(self.vmCoreDump, 'coreDump'),
+ (self.vmCoreDumpCancel, 'dumpCancel'),
(self.vmSnapshot, 'snapshot'),
(self.vmMerge, 'merge'),
(self.vmMergeStatus, 'mergeStatus'),
diff --git a/vdsm/define.py b/vdsm/define.py
index 84aacad..e1d428c 100644
--- a/vdsm/define.py
+++ b/vdsm/define.py
@@ -134,6 +134,9 @@
{'code': 58,
'message':
'Failed to generate coreDump file'}},
+ 'dumpCancelErr': {'status':
+ {'code': 59,
+ 'message': 'Failed to cancel dump'}},
'recovery': {'status':
{'code': 99,
'message':
diff --git a/vdsm/vm.py b/vdsm/vm.py
index be947c6..0a40e97 100644
--- a/vdsm/vm.py
+++ b/vdsm/vm.py
@@ -1345,3 +1345,29 @@
return check
finally:
self._guestCpuLock.release()
+
+ def dumpCancel(self):
+ def reportError(self, key='dumpCancelErr', msg=None):
+ if msg is None:
+ error = errCode[key]
+ else:
+ error = {'status':
+ {'code': errCode[key]['status']['code'],
+ 'message': msg}}
+ self.log.error("Failed to cancel core dump. " + msg,
+ exc_info=True)
+ return error
+
+ self._acquireCpuLockWithTimeout()
+ try:
+ if not self.isDoingDump():
+ return reportError(msg='no core dump in process')
+ if self.dumpMode() == "memory":
+ return reportError(msg='invalid to cancel memory dump')
+ self._doCoredumpThread.stop()
+ return {'status': {'code': 0,
+ 'message': 'core dump process stopped'}}
+ except Exception, e:
+ return reportError(msg=e.message)
+ finally:
+ self._guestCpuLock.release()
diff --git a/vdsm_api/vdsmapi-schema.json b/vdsm_api/vdsmapi-schema.json
index 63b0fb1..39d1cba 100644
--- a/vdsm_api/vdsmapi-schema.json
+++ b/vdsm_api/vdsmapi-schema.json
@@ -5474,6 +5474,16 @@
'data': {'to': 'str', 'params': 'DumpParams'}}
##
+# @VM.dumpCancel:
+#
+# Cancel the currently outgoing core dump process.
+#
+# Since: 4.10.4
+#
+##
+{'command': {'class': 'VM', 'name': 'dumpCancel'}}
+
+##
# @VM.monitorCommand:
#
# Send a command to the qemu monitor.
diff --git a/vdsm_cli/vdsClient.py b/vdsm_cli/vdsClient.py
index c4171d9..32ad348 100644
--- a/vdsm_cli/vdsClient.py
+++ b/vdsm_cli/vdsClient.py
@@ -1669,6 +1669,11 @@
return status['status']['code'], status['status']['message']
+ def do_dumpCancel(self, args):
+ vmId = args[0]
+ response = self.s.dumpCancel(vmId)
+ return response['status']['code'], response['status']['message']
+
def coreDump(self, args):
dumpParams = {'crash': False,
'live': False,
@@ -2413,6 +2418,10 @@
'Start live replication to the destination '
'domain'
)),
+ 'coreDumpCancel': (serv.do_dumpCancel,
+ ('<vmId>',
+ 'cancel machine core dump'
+ )),
'coreDump': (serv.coreDump,
('<vmId> <file> [live=<True|False>] '
'[crash=<True|False>] [bypass-cache=<True|False>] '
--
To view, visit http://gerrit.ovirt.org/11130
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I2fa9e82cfbd43c9edb98fac9af41eb0deb0c67ad
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: ShaoHe Feng <shaohef(a)linux.vnet.ibm.com>
8 years, 1 month
Change in vdsm[master]: [WIP] Added glusterVolumeTop verb
by tjeyasin@redhat.com
Hello Ayal Baron, Bala.FA, Saggi Mizrahi, Federico Simoncelli, Dan Kenigsberg,
I'd like you to do a code review. Please visit
http://gerrit.ovirt.org/7844
to review the following change.
Change subject: [WIP] Added glusterVolumeTop verb
......................................................................
[WIP] Added glusterVolumeTop verb
Added glusterVolumeTopOpen verb
Added glusterVolumeTopRead verb
Added glusterVolumeTopWrite verb
Added glusterVolumeTopOpenDir verb
Added glusterVolumeTopReadDir verb
Added glusterVolumeTopReadPerf
verb Added glusterVolumeTopWritePerf verb
Following is the output structure of glusterVolumeTopOpen
{'statusCode' : CODE,
'brickCount': BRICK-COUNT,
'bricks': {BRICK-NAME: {'count':FILE-COUNT,
'currentOpenFds': CURRENT-OPEN-FDS-COUNT,
'maxOpen': MAX-OPEN,
'maxOpenTime': MAX-OPEN-TIME,
'files': [{FILE-NAME: FILE-OPEN-COUNT}, ...]
}, ...} }
Following is the output structure of glusterVolumeTopRead
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {
'count': FILE-COUNT,
'files': [{FILE-NAME: FILE-READ-COUNT}, ...]}
,...}}
Following is the output structure glusterVolumeTopWrite
{'statusCode' : CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'count': FILE-COUNT,
'files': [{FILE-NAME: FILE-WRITE-COUNT}...]}
,...}}
Following is the output structure glusterVolumeTopOpenDir
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'count':OPEN-DIR-COUNT,
'files': [{DIR-NAME: DIR-OPEN-COUNT}, ...]}
,...}
Following is the output structure glusterVolumeTopReadDir
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'count':READ-DIR-COUNT,
'files': [{DIR-NAME: DIR-READ-COUNT}, ...]}
,...}
Following is the output structure glusterVolumeTopReadPerf
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'fileCount':READ-COUNT,
'throughput': BRICK-WISE-READ-THROUGHPUT,
' timeTaken': TIME-TAKEN,
'files': [{FILE-NAME:
{'throughput':FILE-READ-THROUGHPUT,
'time': TIME}}, ...]}
,...}}
Following is the output structure glusterVolumeTopWritePerf
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'fileCount':WRITE-COUNT,
'throughput': BRICK-WISE-WRITE-THROUGHPUT,
' timeTaken': TIME-TAKEN,
'files': [{FILE-NAME:
{'throughput':FILE-WRITE-THROUGHPUT,
'time': TIME}}, ...]}
,...}}
Change-Id: I96486363a9acb7472014a67fcd2d5185d4f3c428
Signed-off-by: Timothy Asir <tjeyasin(a)redhat.com>
---
M vdsm/gluster/api.py
M vdsm/gluster/cli.py
M vdsm/gluster/exception.py
M vdsm_cli/vdsClientGluster.py
4 files changed, 372 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/44/7844/1
diff --git a/vdsm/gluster/api.py b/vdsm/gluster/api.py
index e52430b..3f493e0 100644
--- a/vdsm/gluster/api.py
+++ b/vdsm/gluster/api.py
@@ -241,6 +241,61 @@
status = self.svdsmProxy.glusterVolumeProfileInfo(volumeName)
return {'profileInfo': status}
+ @exportAsVerb
+ def volumeTopOpen(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopOpen(volumeName,
+ brickName, count)
+ return {'topOpen': status}
+
+ @exportAsVerb
+ def volumeTopRead(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopRead(volumeName,
+ brickName, count)
+ return {'topRead': status}
+
+ @exportAsVerb
+ def volumeTopWrite(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopWrite(volumeName,
+ brickName, count)
+ return {'topWrite': status}
+
+ @exportAsVerb
+ def volumeTopOpenDir(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopOpenDir(volumeName,
+ brickName, count)
+ return {'topOpenDir': status}
+
+ @exportAsVerb
+ def volumeTopWriteDir(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopWriteDir(volumeName,
+ brickName, count)
+ return {'topWriteDir': status}
+
+ @exportAsVerb
+ def volumeTopReadPerf(self, volumeName, blockSize=None, count=None,
+ brickName=None, listCount=None, options=None):
+ status = self.svdsmProxy.glusterVolumeTopReadPerf(volumeName,
+ blockSize,
+ count,
+ brickName,
+ listCount)
+ return {'topReadPerf': status}
+
+ @exportAsVerb
+ def volumeTopWritePerf(self, volumeName, blockSize=None, count=None,
+ brickName=None, listCount=None, options=None):
+ status = self.svdsmProxy.glusterVolumeTopWritePerf(volumeName,
+ blockSize,
+ count,
+ brickName,
+ listCount)
+ return {'topWritePerf': status}
+
def getGlusterMethods(gluster):
l = []
diff --git a/vdsm/gluster/cli.py b/vdsm/gluster/cli.py
index b91a04f..ba4768c 100644
--- a/vdsm/gluster/cli.py
+++ b/vdsm/gluster/cli.py
@@ -334,6 +334,66 @@
return volumeInfoDict
+def _parseGlusterVolumeTopOpen(tree):
+ bricks = {}
+ for brick in tree.findall('volTop/brick'):
+ fileList = []
+ for file in brick.findall('file'):
+ fileList.append({file.find('filename').text:
+ file.find('count').text})
+ bricks[brick.find('name').text] = {
+ 'count': brick.find('members').text,
+ 'currentOpen': brick.find('currentOpen').text,
+ 'maxOpen': brick.find('maxOpen').text,
+ 'maxOpenTime': brick.find('maxOpenTime').text,
+ 'files': fileList}
+ status = {
+ 'topOp': tree.find('volTop/topOp').text,
+ 'brickCount': tree.find('volTop/brickCount').text,
+ 'statusCode': tree.find('opRet').text,
+ 'bricks': bricks}
+ return status
+
+
+def _parseGlusterVolumeTop(tree):
+ bricks = {}
+ for brick in tree.findall('volTop/brick'):
+ fileList = []
+ for fileTag in brick.findall('file'):
+ fileList.append({fileTag.find('filename').text:
+ fileTag.find('count').text})
+ bricks[brick.find('name').text] = {
+ 'count': brick.find('members').text,
+ 'files': fileList}
+ status = {
+ 'topOp': tree.find('volTop/topOp').text,
+ 'brickCount': tree.find('volTop/brickCount').text,
+ 'statusCode': tree.find('opRet').text,
+ 'bricks': bricks}
+ return status
+
+
+def _parseGlusterVolumeTopPerf(tree):
+ bricks = {}
+ for brick in tree.findall('volTop/brick'):
+ fileList = []
+ for fileTag in brick.findall('file'):
+ fileList.append({fileTag.find('filename').text:
+ {'count': fileTag.find('count').text,
+ 'time': fileTag.find('time').text}})
+ bricks[brick.find('name').text] = {
+ 'count': brick.find('members').text,
+ 'throughput': brick.find('throughput').text,
+ 'timeTaken': brick.find('timeTaken').text,
+ 'files': fileList}
+ status = {
+ 'topOp': tree.find('volTop/topOp').text,
+ 'brickCount': tree.find('volTop/brickCount').text,
+ 'statusCode': tree.find("opRet").text,
+ 'bricks': bricks}
+ return status
+
+
def _parseGlusterVolumeProfileInfo(tree):
bricks = {}
for brick in tree.findall('volProfile/brick'):
@@ -819,3 +879,132 @@
return _parseGlusterVolumeProfileInfo(xmltree)
except:
raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopOpen(volumeName, brickName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "open"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopOpenFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTopOpen(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopRead(volumeName, brickName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "read"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopReadFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTop(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopWrite(volumeName, brickName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "write"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopWriteFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTop(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopOpenDir(volumeName, brickName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "opendir"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopOpenDirFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTop(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopReadDir(volumeName, bricName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "write"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopReadDirFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTop(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopReadPerf(volumeName, blockSize=None, count=None,
+ brickName=None, listCount=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "read-perf"]
+ if blockSize:
+ command += ["bs", "%s" % blockSize]
+ if count:
+ command += ["count", "%s" % count]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if listCount:
+ command += ["list-cnt", "%s" % listCount]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopReadPerfFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTopPerf(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopWritePerf(volumeName, blockSize=None, count=None,
+ brickName=None, listCount=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "write-perf"]
+ if blockSize:
+ command += ["bs", "%s" % blockSize]
+ if count:
+ command += ["count", "%s" % count]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if listCount:
+ command += ["list-cnt", "%s" % listCount]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopWritePerfFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTopPerf(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
diff --git a/vdsm/gluster/exception.py b/vdsm/gluster/exception.py
index bc20dd0..b392ec8 100644
--- a/vdsm/gluster/exception.py
+++ b/vdsm/gluster/exception.py
@@ -343,6 +343,41 @@
message = "Volume profile info failed"
+class GlusterVolumeTopOpenFailedException(GlusterVolumeException):
+ code = 4161
+ message = "Volume top open failed"
+
+
+class GlusterVolumeTopReadFailedException(GlusterVolumeException):
+ code = 4162
+ message = "Volume top read failed"
+
+
+class GlusterVolumeTopWriteFailedException(GlusterVolumeException):
+ code = 4163
+ message = "Volume top write failed"
+
+
+class GlusterVolumeTopOpenDirFailedException(GlusterVolumeException):
+ code = 4164
+ message = "Volume top open dir failed"
+
+
+class GlusterVolumeTopReadDirFailedException(GlusterVolumeException):
+ code = 4165
+ message = "Volume top read dir failed"
+
+
+class GlusterVolumeTopReadPerfFailedException(GlusterVolumeException):
+ code = 4166
+ message = "Volume top read perf failed"
+
+
+class GlusterVolumeTopWritePerfFailedException(GlusterVolumeException):
+ code = 4167
+ message = "Volume top write perf failed"
+
+
# Host
class GlusterHostException(GlusterException):
code = 4400
diff --git a/vdsm_cli/vdsClientGluster.py b/vdsm_cli/vdsClientGluster.py
index 8422695..3663c63 100644
--- a/vdsm_cli/vdsClientGluster.py
+++ b/vdsm_cli/vdsClientGluster.py
@@ -221,6 +221,41 @@
pp.pprint(status)
return status['status']['code'], status['status']['message']
+ def do_glusterVolumeTopOpen(self, args):
+ status = self.s.glusterVolumeTopOpen(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopRead(self, args):
+ status = self.s.glusterVolumeTopRead(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopWrite(self, args):
+ status = self.s.glusterVolumeTopWrite(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopOpenDir(self, args):
+ status = self.s.glusterVolumeTopOpenDir(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopReadDir(self, args):
+ status = self.s.glusterVolumeTopReadDir(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopReadPerf(self, args):
+ status = self.s.glusterVolumeTop(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopWritePerf(self, args):
+ status = self.s.glusterVolumeTop(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
def getGlusterCmdDict(serv):
return {
@@ -403,4 +438,62 @@
('<volume_name>\n\t<volume_name> is existing volume name',
'get gluster volume profile info'
)),
+ 'glusterVolumeTopOpen':
+ (serv.do_glusterVolumeTopOpen,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get volume top open fd count and maximum fd count of '
+ 'a given volume with its all brick or specified brick'
+ )),
+ 'glusterVolumeTopRead':
+ (serv.do_glusterVolumeTopRead,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of highest read calls on each brick or '
+ 'a specified brick of a volume'
+ )),
+ 'glusterVolumeTopWrite':
+ (serv.do_glusterVolumeTopWrite,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of highest write calls on each brick or '
+ 'a specified brick of a volume'
+ )),
+ 'glusterVolumeTopOpenDir':
+ (serv.do_glusterVolumeTopOpenDir,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of highest open calls on directories of each brick '
+ 'or a specified brick of a volume'
+ )),
+ 'glusterVolumeTopReadDir':
+ (serv.do_glusterVolumeTopReadDir,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of highest read calls on directories of each brick '
+ 'or a specified brick of a volume'
+ )),
+ 'glusterVolumeTopReadPerf':
+ (serv.do_glusterVolumeTopReadPerf,
+ ('<volume_name> [block_size=<block_size>] '
+ '[count=<count>] [brick=<existing_brick>] '
+ '[list_count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of read throughput of files on bricks. '
+ 'if the block size and the count is not specified, '
+ 'it will give the output based on historical data'
+ )),
+ 'glusterVolumeTopWritePerf':
+ (serv.do_glusterVolumeTopWritePerf,
+ ('<volume_name> [block_size=<block_size>] '
+ '[count=<count>] [brick=<existing_brick>] '
+ '[list_count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of write throughput of files on bricks'
+ )),
}
--
To view, visit http://gerrit.ovirt.org/7844
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I96486363a9acb7472014a67fcd2d5185d4f3c428
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Timothy Asir <tjeyasin(a)redhat.com>
Gerrit-Reviewer: Ayal Baron <abaron(a)redhat.com>
Gerrit-Reviewer: Bala.FA <barumuga(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Federico Simoncelli <fsimonce(a)redhat.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
8 years, 1 month