Francesco Romani has uploaded a new change for review.
Change subject: janitorial: vm: switch to response.error() ......................................................................
janitorial: vm: switch to response.error()
response.error() offers a nicer and cleaner way to report error responses instead of the bare errCode['something'], even when we are fine with the default generic message.
This patch makes use of response.error() all across vm.py.
Change-Id: I3825faf7a144ef8973dee3cb9f9f0e52fabfc039 Signed-off-by: Francesco Romani fromani@redhat.com --- M vdsm/virt/vm.py 1 file changed, 62 insertions(+), 62 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/68/38268/1
diff --git a/vdsm/virt/vm.py b/vdsm/virt/vm.py index e7fbcf2..cd392c7 100644 --- a/vdsm/virt/vm.py +++ b/vdsm/virt/vm.py @@ -1589,7 +1589,7 @@ if self.lastStatus in (vmstatus.MIGRATION_SOURCE, vmstatus.SAVING_STATE, vmstatus.DOWN): self.log.error('cannot cont while %s', self.lastStatus) - return errCode['unexpected'] + return response.error('unexpected') self._underlyingCont() self._setGuestCpuRunning(self._isDomainRunning(), guestCpuLocked=True) @@ -1638,7 +1638,7 @@
def shutdown(self, delay, message, reboot, timeout, force): if self.lastStatus == vmstatus.DOWN: - return errCode['noVM'] + return response.error('noVM')
delay = int(delay)
@@ -1913,13 +1913,13 @@ try: if self.isMigrating(): self.log.warning('vm already migrating') - return errCode['exist'] + return response.error('exist') if self.hasTransientDisks(): - return errCode['transientErr'] + return response.error('transientErr') # while we were blocking, another migrationSourceThread could have # taken self Down if self._lastStatus == vmstatus.DOWN: - return errCode['noVM'] + return response.error('noVM') self._migrationSourceThread = migration.SourceThread( self, **params) self._migrationSourceThread.start() @@ -1940,11 +1940,11 @@ return self._migrationSourceThread.status except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_OPERATION_INVALID: - return errCode['migCancelErr'] + return response.error('migCancelErr') raise except AttributeError: if self._dom is None: - return errCode['migCancelErr'] + return response.error('migCancelErr') raise finally: self._guestCpuLock.release() @@ -2317,7 +2317,7 @@
def hotplugNic(self, params): if self.isMigrating(): - return errCode['migInProgress'] + return response.error('migInProgress')
nicParams = params['nic'] nic = vmdevices.network.Interface(self.conf, self.log, **nicParams) @@ -2336,7 +2336,7 @@ nicXml = hooks.after_nic_hotplug_fail( nicXml, self.conf, params=nic.custom) if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN: - return errCode['noVM'] + return response.error('noVM') return response.error('hotplugNic', e.message) else: # FIXME! We may have a problem here if vdsm dies right after @@ -2524,7 +2524,7 @@ response['vmList'] = self.status() return response else: - return errCode['updateDevice'] + return response.error('updateDevice')
def updateDevice(self, params): if params.get('deviceType') == hwclass.NIC: @@ -2532,11 +2532,11 @@ elif params.get('deviceType') == hwclass.GRAPHICS: return self._updateGraphicsDevice(params) else: - return errCode['noimpl'] + return response.error('noimpl')
def hotunplugNic(self, params): if self.isMigrating(): - return errCode['migInProgress'] + return response.error('migInProgress')
nicParams = params['nic']
@@ -2583,7 +2583,7 @@ except libvirt.libvirtError as e: self.log.exception("Hotunplug failed") if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN: - return errCode['noVM'] + return response.error('noVM') # Restore NIC device in vm's conf and _devices if nicDev: with self._confLock: @@ -2602,7 +2602,7 @@ def setNumberOfCpus(self, numberOfCpus):
if self.isMigrating(): - return errCode['migInProgress'] + return response.error('migInProgress')
self.log.debug("Setting number of cpus to : %s", numberOfCpus) hooks.before_set_num_of_cpus() @@ -2612,7 +2612,7 @@ except libvirt.libvirtError as e: self.log.exception("setNumberOfCpus failed") if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN: - return errCode['noVM'] + return response.error('noVM') return response.error('setNumberOfCpusErr', e.message)
self.conf['smp'] = str(numberOfCpus) @@ -2646,7 +2646,7 @@ """
if self.isMigrating(): - return errCode['migInProgress'] + return response.error('migInProgress')
if not params: self.log.error("updateVmPolicy got an empty policy.") @@ -2710,7 +2710,7 @@ except libvirt.libvirtError as e: self.log.exception("updateVmPolicy failed") if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN: - return errCode['noVM'] + return response.error('noVM') else: return self._reportError(key='updateVmPolicyErr', msg=e.message) @@ -2797,7 +2797,7 @@ except libvirt.libvirtError as e: self.log.exception("setVmIoTune failed") if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN: - return errCode['noVM'] + return response.error('noVM') else: return self._reportError(key='updateIoTuneErr', msg=e.message) @@ -2860,7 +2860,7 @@
def hotplugDisk(self, params): if self.isMigrating(): - return errCode['migInProgress'] + return response.error('migInProgress')
diskParams = params.get('drive', {}) diskParams['path'] = self.cif.prepareVolumePath(diskParams) @@ -2873,7 +2873,7 @@ drive = vmdevices.storage.Drive(self.conf, self.log, **diskParams)
if drive.hasVolumeLeases: - return errCode['noimpl'] + return response.error('noimpl')
driveXml = drive.getXML().toprettyxml(encoding='utf-8') # TODO: this is debug information. For 3.6.x we still need to @@ -2889,7 +2889,7 @@ self.log.exception("Hotplug failed") self.cif.teardownVolumePath(diskParams) if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN: - return errCode['noVM'] + return response.error('noVM') return response.error('hotplugDisk', e.message) else: # FIXME! We may have a problem here if vdsm dies right after @@ -2909,7 +2909,7 @@
def hotunplugDisk(self, params): if self.isMigrating(): - return errCode['migInProgress'] + return response.error('migInProgress')
diskParams = params.get('drive', {}) diskParams['path'] = self.cif.prepareVolumePath(diskParams) @@ -2922,7 +2922,7 @@ return response.error('hotunplugDisk', "Disk not found")
if drive.hasVolumeLeases: - return errCode['noimpl'] + return response.error('noimpl')
driveXml = drive.getXML().toprettyxml(encoding='utf-8') # TODO: this is debug information. For 3.6.x we still need to @@ -2949,7 +2949,7 @@ except libvirt.libvirtError as e: self.log.exception("Hotunplug failed") if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN: - return errCode['noVM'] + return response.error('noVM') self._devices[hwclass.DISK].append(drive) # Restore disk device in vm's conf and _devices if diskDev: @@ -3239,7 +3239,7 @@ vmDrives = {}
if self.isMigrating(): - return errCode['migInProgress'] + return response.error('migInProgress')
for drive in snapDrives: baseDrv, tgetDrv = _normSnapDriveParams(drive) @@ -3260,15 +3260,15 @@ except LookupError: # The volume we want to snapshot doesn't exist self.log.error("The base volume doesn't exist: %s", baseDrv) - return errCode['snapshotErr'] + return response.error('snapshotErr')
if vmDrive.hasVolumeLeases: self.log.error('disk %s has volume leases', vmDrive.name) - return errCode['noimpl'] + return response.error('noimpl')
if vmDrive.transientDisk: self.log.error('disk %s is a transient disk', vmDrive.name) - return errCode['transientErr'] + return response.error('transientErr')
vmDevName = vmDrive.name
@@ -3300,7 +3300,7 @@ self.log.exception('unable to prepare the volume path for ' 'disk %s', vmDevName) _rollbackDrives(preparedDrives) - return errCode['snapshotErr'] + return response.error('snapshotErr')
snapType = 'block' if vmDrives[vmDevName].blockDev else 'file' snapelem = _diskSnapshot(vmDevName, newDrives[vmDevName]["path"], @@ -3364,7 +3364,7 @@ self.log.exception("Unable to take snapshot") if memoryParams: self.cif.teardownVolumePath(memoryVol) - return errCode['snapshotErr'] + return response.error('snapshotErr')
# We are padding the memory volume with block size of zeroes # because qemu-img truncates files such that their size is @@ -3439,20 +3439,20 @@ srcDrive = self._findDriveByUUIDs(srcDisk) except LookupError: self.log.error("Unable to find the disk for '%s'", srcDisk) - return errCode['imageErr'] + return response.error('imageErr')
if srcDrive.hasVolumeLeases: - return errCode['noimpl'] + return response.error('noimpl')
if srcDrive.transientDisk: - return errCode['transientErr'] + return response.error('transientErr')
try: self._setDiskReplica(srcDrive, dstDisk) except Exception: self.log.error("Unable to set the replication for disk '%s' with " "destination '%s'", srcDrive.name, dstDisk) - return errCode['replicaErr'] + return response.error('replicaErr')
dstDiskCopy = dstDisk.copy()
@@ -3484,7 +3484,7 @@ except Exception: self.log.exception("Cannot complete the disk replication process") self._delDiskReplica(srcDrive) - return errCode['replicaErr'] + return response.error('replicaErr')
if srcDrive.chunked: try: @@ -3502,16 +3502,16 @@ try: srcDrive = self._findDriveByUUIDs(srcDisk) except LookupError: - return errCode['imageErr'] + return response.error('imageErr')
if srcDrive.hasVolumeLeases: - return errCode['noimpl'] + return response.error('noimpl')
if srcDrive.transientDisk: - return errCode['transientErr'] + return response.error('transientErr')
if not srcDrive.isDiskReplicationInProgress(): - return errCode['replicaErr'] + return response.error('replicaErr')
# Looking for the replication blockJob info (checking its presence) blkJobInfo = self._dom.blockJobInfo(srcDrive.name, 0) @@ -3523,12 +3523,12 @@
# Making sure that we don't have any stale information self._delDiskReplica(srcDrive) - return errCode['replicaErr'] + return response.error('replicaErr')
# Checking if we reached the replication mode ("mirroring" in libvirt # and qemu terms) if blkJobInfo['cur'] != blkJobInfo['end']: - return errCode['unavail'] + return response.error('unavail')
dstDiskCopy = dstDisk.copy()
@@ -3567,7 +3567,7 @@ # There is nothing we can do at this point other than logging self.log.exception("Unable to teardown the replication " "destination disk") - return errCode['changeDisk'] # Finally is evaluated + return response.error('changeDisk') # Finally is evaluated else: try: self.cif.teardownVolumePath(diskToTeardown) @@ -3595,7 +3595,7 @@ "Requested extension size %s for disk %s is smaller " "than the current size %s", newSizeBytes, drive.name, currentSize) - return errCode['resizeErr'] + return response.error('resizeErr')
# Uncommit the current volume size (mark as in transaction) self.cif.irs.setVolumeSize(drive.domainID, drive.poolID, @@ -3608,7 +3608,7 @@ self.log.exception( "An error occurred while trying to extend the disk %s " "to size %s", drive.name, newSizeBytes) - return errCode['updateDevice'] + return response.error('updateDevice') finally: # In all cases we want to try and fix the size in the metadata. # Same as above, this is what libvirt would do, see BZ#963881 @@ -3650,7 +3650,7 @@ "Libvirt failed to notify the new size %s to the " "running VM, the change will be available at the ", "reboot", sizeRoundedBytes, exc_info=True) - return errCode['updateDevice'] + return response.error('updateDevice')
return {'status': doneCode, 'size': str(sizeRoundedBytes)}
@@ -3658,12 +3658,12 @@ try: newSizeBytes = int(newSizeBytes) except ValueError: - return errCode['resizeErr'] + return response.error('resizeErr')
try: drive = self._findDriveByUUIDs(driveSpecs) except LookupError: - return errCode['imageErr'] + return response.error('imageErr')
try: if drive.format == "cow": @@ -3673,7 +3673,7 @@ except Exception: self.log.exception("Unable to extend disk %s to size %s", drive.name, newSizeBytes) - return errCode['updateDevice'] + return response.error('updateDevice')
def _onWatchdogEvent(self, action): def actionToString(action): @@ -3714,7 +3714,7 @@ try: path = self.cif.prepareVolumePath(drivespec) except VolumeError: - return errCode['imageErr'] + return response.error('imageErr') diskelem = vmxml.Element('disk', type='file', device=vmDev) diskelem.appendChildWithArgs('source', file=path) diskelem.appendChildWithArgs('target', dev=blockdev) @@ -3725,7 +3725,7 @@ except Exception: self.log.debug("updateDeviceFlags failed", exc_info=True) self.cif.teardownVolumePath(drivespec) - return errCode['changeDisk'] + return response.error('changeDisk') if vmDev in self.conf: self.cif.teardownVolumePath(self.conf[vmDev])
@@ -3902,7 +3902,7 @@ self.conf['vmId'], exc_info=True) if e.get_error_code() == libvirt.VIR_ERR_OPERATION_FAILED: return self._destroyVmForceful() - return errCode['destroyErr'] + return response.error('destroyErr') return {'status': doneCode}
def _destroyVmForceful(self): @@ -3911,7 +3911,7 @@ except libvirt.libvirtError: self.log.warning("Failed to destroy VM '%s'", self.conf['vmId'], exc_info=True) - return errCode['destroyErr'] + return response.error('destroyErr') return {'status': doneCode}
def deleteVm(self): @@ -4673,7 +4673,7 @@ def merge(self, driveSpec, baseVolUUID, topVolUUID, bandwidth, jobUUID): if not caps.getLiveMergeSupport(): self.log.error("Live merge is not supported on this host") - return errCode['mergeErr'] + return response.error('mergeErr')
bandwidth = int(bandwidth) if jobUUID is None: @@ -4682,14 +4682,14 @@ try: drive = self._findDriveByUUIDs(driveSpec) except LookupError: - return errCode['imageErr'] + return response.error('imageErr')
# Check that libvirt exposes full volume chain information chains = self._driveGetActualVolumeChain([drive]) if drive['alias'] not in chains: self.log.error("merge: libvirt does not support volume chain " "monitoring. Unable to perform live merge.") - return errCode['mergeErr'] + return response.error('mergeErr')
base = top = None for v in drive.volumeChain: @@ -4699,10 +4699,10 @@ top = v['path'] if base is None: self.log.error("merge: base volume '%s' not found", baseVolUUID) - return errCode['mergeErr'] + return response.error('mergeErr') if top is None: self.log.error("merge: top volume '%s' not found", topVolUUID) - return errCode['mergeErr'] + return response.error('mergeErr')
# If base is a shared volume then we cannot allow a merge. Otherwise # We'd corrupt the shared volume for other users. @@ -4710,10 +4710,10 @@ drive.imageID, baseVolUUID) if res['status']['code'] != 0: self.log.error("Unable to get volume info for '%s'", baseVolUUID) - return errCode['mergeErr'] + return response.error('mergeErr') if res['info']['voltype'] == 'SHARED': self.log.error("merge: Refusing to merge into a shared volume") - return errCode['mergeErr'] + return response.error('mergeErr')
# Indicate that we expect libvirt to maintain the relative paths of # backing files. This is necessary to ensure that a volume chain is @@ -4738,7 +4738,7 @@ if res['status']['code'] != 0: self.log.error("Unable to get volume info for '%s'", topVolUUID) - return errCode['mergeErr'] + return response.error('mergeErr') topSize = int(res['info']['apparentsize'])
# Take the jobs lock here to protect the new job we are tracking from @@ -4749,7 +4749,7 @@ 'commit') except BlockJobExistsError: self.log.error("A block job is already active on this disk") - return errCode['mergeErr'] + return response.error('mergeErr') self.log.info("Starting merge with jobUUID='%s'", jobUUID)
try: @@ -4760,7 +4760,7 @@ except (RuntimeError, libvirt.libvirtError): self.log.exception("Live merge failed (job: %s)", jobUUID) self.untrackBlockJob(jobUUID) - return errCode['mergeErr'] + return response.error('mergeErr')
# blockCommit will cause data to be written into the base volume. # Perform an initial extension to ensure there is enough space to