Patch was abandoned by Saggi Mizrahi (smizrahi(a)redhat.com) because: Bug is non issue and refactoring can wait
You can review this change at: http://gerrit.usersys.redhat.com/683
New patch submitted by Erez Sh (erez(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/684
commit b4b7c5efc10d4c0702523c69d4ac54ffba2d87b1
Author: Erez Sh <erez(a)redhat.com>
Date: Mon Jul 11 18:30:48 2011 +0300
BZ#720425 - No longer using the return value from wait, because there is None.
Change-Id: I1f6f22fb4e047dc24335eeef7664c33e1076eb02
diff --git a/vdsm/libvirtvm.py b/vdsm/libvirtvm.py
index 0516c3b..89330c4 100644
--- a/vdsm/libvirtvm.py
+++ b/vdsm/libvirtvm.py
@@ -214,7 +214,8 @@ class MigrationMonitorThread(threading.Thread):
def run(self):
self._vm.log.debug('starting migration monitor thread')
- while not self._stop.wait(self._MIGRATION_MONITOR_INTERVAL):
+ while not self._stop.isSet():
+ self._stop.wait(self._MIGRATION_MONITOR_INTERVAL)
jobType, timeElapsed, _, \
dataTotal, dataProcessed, _, \
memTotal, memProcessed, _, \
Erez Sh has posted comments on this change.
Change subject: BZ#720425 - No longer using the return value from wait, because there is None.
......................................................................
Patch Set 1: Verified
Ran migration tests and it worked
--
To view, visit http://gerrit.usersys/684
To unsubscribe, visit http://gerrit.usersys/settings
Gerrit-MessageType: comment
Gerrit-Change-Id: I1f6f22fb4e047dc24335eeef7664c33e1076eb02
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Erez Sh <erez(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Erez Sh <erez(a)redhat.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
New patch submitted by Igor Lvovsky (ilvovsky(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/697
commit 7c44b0af0303d7a4876091cc5bfc9783dbcb5ecb
Author: Igor Lvovsky <ilvovsky(a)redhat.com>
Date: Wed Jul 13 15:14:19 2011 +0300
BZ#717847 - Postpone VM's periodic tasks until all volumes will prepared
Change-Id: I9424087c3c9ef06a4a88921dc8ca14e890b7a44f
diff --git a/vdsm/libvirtvm.py b/vdsm/libvirtvm.py
index 40567a5..79da4b2 100644
--- a/vdsm/libvirtvm.py
+++ b/vdsm/libvirtvm.py
@@ -55,6 +55,10 @@ class VmStatsThread(utils.AdvancedStatsThread):
if self._vm._incomingMigrationPending():
return
+ if not self._vm._volumesPrepared:
+ # Avoid queries from storage during recovery process
+ return
+
for vmDrive in self._vm._drives:
if vmDrive.blockDev and vmDrive.format == 'cow':
capacity, alloc, physical = \
@@ -66,7 +70,7 @@ class VmStatsThread(utils.AdvancedStatsThread):
self._vm._onHighWrite(vmDrive.name, alloc)
def _updateVolumes(self):
- if not self._vm.cif.irs.getConnectedStoragePoolsList()['poollist']:
+ if not self._vm._volumesPrepared:
# Avoid queries from storage during recovery process
return
@@ -82,6 +86,10 @@ class VmStatsThread(utils.AdvancedStatsThread):
return cpuTime / 1000**3
def _sampleDisk(self):
+ if not self._vm._volumesPrepared:
+ # Avoid queries from storage during recovery process
+ return
+
diskSamples = {}
for vmDrive in self._vm._drives:
diskSamples[vmDrive.name] = self._vm._dom.blockStats(vmDrive.name)
diff --git a/vdsm/vm.py b/vdsm/vm.py
index ecc8e70..5b30b1e 100644
--- a/vdsm/vm.py
+++ b/vdsm/vm.py
@@ -300,6 +300,7 @@ class Vm(object):
self.conf.pop('elapsedTimeOffset', 0))
self._cdromPreparedPath = ''
self._floppyPreparedPath = ''
+ self._volumesPrepared = False
self._pathsPreparedEvent = threading.Event()
self.saveState()
@@ -418,6 +419,8 @@ class Vm(object):
def preparePaths(self):
for drive in self.conf.get('drives', []):
drive['path'] = self._prepareVolumePath(drive)
+ # Now we got all needed locks
+ self._volumesPrepared = True
try:
self._cdromPreparedPath = self._prepareVolumePath(
New patch submitted by Dan Kenigsberg (danken(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/696
commit cfcabd3b7bba2d6513b65216017828f326f2f660
Author: Dan Kenigsberg <danken(a)redhat.com>
Date: Wed Jul 13 14:23:45 2011 +0300
BZ#720919 spmprotect.sh: remove log noise
check_renew may have been called after fence or release have removed
RENEWDIR. We should not print nonsense to the log in that case.
Change-Id: I7ce575b863fcbdf6b0bf35747a23882a53ae1a39
diff --git a/vdsm/storage/protect/spmprotect.sh b/vdsm/storage/protect/spmprotect.sh
index 3548b17..f476446 100755
--- a/vdsm/storage/protect/spmprotect.sh
+++ b/vdsm/storage/protect/spmprotect.sh
@@ -135,7 +135,7 @@ function check_renew() {
res=0
fi
fi
- if pushd "$RENEWDIR" > /dev/null; then
+ if pushd "$RENEWDIR" > /dev/null 2>&1 ; then
rm -f $list
popd > /dev/null
fi
New patch submitted by David Naori (dnaori(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/699
commit 1ce349a644d20dfd3d71f8b8f8eda30fab8108fc
Author: David Naori <dnaori(a)redhat.com>
Date: Wed Jul 13 16:34:04 2011 +0300
libvirtconnection: Add "VIR_FROM_RPC" to trigger prepareForShutdown.
libvirt-0.9.3-2 has change the behavior of libvirt disconnection,
we now should trigger prepareForShutdown in this case too. (Reference: BZ#681911)
Change-Id: I2a85b01e5d9a32aab2e9399035043d4a08856d67
diff --git a/vdsm/libvirtconnection.py b/vdsm/libvirtconnection.py
index ed1ae2e..48030e2 100644
--- a/vdsm/libvirtconnection.py
+++ b/vdsm/libvirtconnection.py
@@ -68,8 +68,8 @@ def get(cif=None):
setattr(ret, name, wrapMethod(method))
return ret
except libvirt.libvirtError, e:
- if (e.get_error_domain() == libvirt.VIR_FROM_REMOTE and
- e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR):
+ if (e.get_error_domain() in (libvirt.VIR_FROM_REMOTE, libvirt.VIR_FROM_RPC)
+ and e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR):
cif.log.error('connection to libvirt broken. '
'taking vdsm down.')
cif.prepareForShutdown()
New patch submitted by Igor Lvovsky (ilvovsky(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/646
commit e76296ddddfeecb08c56e2a3f1e9960751442b8a
Author: Igor Lvovsky <ilvovsky(a)redhat.com>
Date: Tue Jul 5 16:00:21 2011 +0300
BZ#717952 - Fix race in moveImage with overwrite of VM with several disks on NFS domains
Change-Id: I367e83a700e84be0e4cb0ca5b7a02416cb54ac25
diff --git a/vdsm/storage/image.py b/vdsm/storage/image.py
index 51e237b..4fc6f5b 100644
--- a/vdsm/storage/image.py
+++ b/vdsm/storage/image.py
@@ -375,21 +375,26 @@ class Image:
"""
Relink all hardlinks of the template 'volUUID' in all VMs based on it
"""
- # Avoid relink templates for SAN domains
- if destDom.getStorageType() in [ sd.NFS_DOMAIN ]:
- vol = destDom.produceVolume(imgUUID=imgUUID, volUUID=volUUID)
- chList = vol.getAllChildrenList(self.repoPath, destDom.sdUUID, imgUUID, volUUID)
- for ch in chList:
- # Remove hardlink of this template
- v = destDom.produceVolume(imgUUID=ch['imgUUID'], volUUID=volUUID)
- v.delete(postZero=False, force=True)
-
- # Now we should re-link deleted hardlink, if exists
- newVol = destDom.produceVolume(imgUUID=imgUUID, volUUID=volUUID)
- imageDir = self.getImageDir(destDom.sdUUID, ch['imgUUID'])
- newVol.share(imageDir)
- else:
+ # Avoid relink templates for non-NFS domains
+ if destDom.getStorageType() not in [ sd.NFS_DOMAIN ]:
self.log.debug("Doesn't relink templates non-NFS domain %s", destDom.sdUUID)
+ return
+
+ vol = destDom.produceVolume(imgUUID=imgUUID, volUUID=volUUID)
+ # Relink templates only
+ if not vol.isShared():
+ self.log.debug("Doesn't relink regular volume %s of image %s", volUUID, imgUUID)
+ return
+ chList = vol.getAllChildrenList(self.repoPath, destDom.sdUUID, imgUUID, volUUID)
+ for ch in chList:
+ # Remove hardlink of this template
+ v = destDom.produceVolume(imgUUID=ch['imgUUID'], volUUID=volUUID)
+ v.delete(postZero=False, force=True)
+
+ # Now we should re-link deleted hardlink, if exists
+ newVol = destDom.produceVolume(imgUUID=imgUUID, volUUID=volUUID)
+ imageDir = self.getImageDir(destDom.sdUUID, ch['imgUUID'])
+ newVol.share(imageDir)
def createFakeTemplate(self, sdUUID, volParams):
"""
New patch submitted by David Naori (dnaori(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/687
commit 3a787a84a78f1b9ea202e3c10df2985dc8382d3c
Author: David Naori <dnaori(a)redhat.com>
Date: Tue Jul 12 12:17:56 2011 +0300
BZ#720359: Remove lockfile instead of stop libvirt-guests.
Stop libvirt-guests using Sys-V-Init script tries to access libvirtd
which requires sasl authentication. Instead removing lockfile signifies
to libvirt-guests that his stopped.
Change-Id: Id74ce865ad06f7c3de86f937ff17f6f35affa818
diff --git a/vdsm/vdsmd b/vdsm/vdsmd
index 6b35e23..d27fbaf 100755
--- a/vdsm/vdsmd
+++ b/vdsm/vdsmd
@@ -130,7 +130,11 @@ shutdown_conflicting_srv() {
/sbin/chkconfig $srv off
if /sbin/service $srv status > /dev/null 2>&1;
then
- /sbin/service $srv stop
+ if [ "$serv" == "libvirt-guests" ]; then
+ rm -f /var/lock/subsys/libvirt-guests
+ else
+ /sbin/service $srv stop
+ fi
fi
done
return 0
Dan Kenigsberg has submitted this change and it was merged.
Change subject: Related to BZ#719516: Make list nicer and include listNames inside.
......................................................................
Related to BZ#719516: Make list nicer and include listNames inside.
Change-Id: I0753685d3032e79cc3f5de888b8ee7e865954d9c
---
M vdsm_cli/vdsClient.py
1 file changed, 37 insertions(+), 44 deletions(-)
Approvals:
Dan Kenigsberg: Looks good to me, approved
David Naori: Verified
Erez Sh: Looks good to me, but someone else must approve
--
To view, visit http://gerrit.usersys/676
To unsubscribe, visit http://gerrit.usersys/settings
Gerrit-MessageType: merged
Gerrit-Change-Id: I0753685d3032e79cc3f5de888b8ee7e865954d9c
Gerrit-PatchSet: 7
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: David Naori <dnaori(a)redhat.com>
Gerrit-Reviewer: Ayal Baron
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: David Naori <dnaori(a)redhat.com>
Gerrit-Reviewer: Erez Sh <erez(a)redhat.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
Dan Kenigsberg has posted comments on this change.
Change subject: Related to BZ#719516: Make list nicer and include listNames inside.
......................................................................
Patch Set 7: Looks good to me, approved
--
To view, visit http://gerrit.usersys/676
To unsubscribe, visit http://gerrit.usersys/settings
Gerrit-MessageType: comment
Gerrit-Change-Id: I0753685d3032e79cc3f5de888b8ee7e865954d9c
Gerrit-PatchSet: 7
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: David Naori <dnaori(a)redhat.com>
Gerrit-Reviewer: Ayal Baron
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: David Naori <dnaori(a)redhat.com>
Gerrit-Reviewer: Erez Sh <erez(a)redhat.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
Dan Kenigsberg has posted comments on this change.
Change subject: BZ#720425 - No longer using the return value from wait, because there is None.
......................................................................
Patch Set 1: Looks good to me, approved
--
To view, visit http://gerrit.usersys/684
To unsubscribe, visit http://gerrit.usersys/settings
Gerrit-MessageType: comment
Gerrit-Change-Id: I1f6f22fb4e047dc24335eeef7664c33e1076eb02
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Erez Sh <erez(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
New patch submitted by David Naori (dnaori(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/676
commit 8e24c353d97f53ba139a504d11ac80891c3e4cff
Author: David Naori <dnaori(a)redhat.com>
Date: Thu Jul 7 16:21:15 2011 +0300
Related to BZ#719516: Make list nicer and include listNames inside.
Change-Id: I0753685d3032e79cc3f5de888b8ee7e865954d9c
diff --git a/vdsm_cli/vdsClient.py b/vdsm_cli/vdsClient.py
index 38640cc..32221a7 100755
--- a/vdsm_cli/vdsClient.py
+++ b/vdsm_cli/vdsClient.py
@@ -173,47 +173,40 @@ class service:
return self.ExecAndExit(self.s.changeFloppy(vmId, file))
def do_list(self, args):
- table=False
- if len(args):
- if args[0] == 'table':
- table=True
- response = self.s.list(True)
- if response['status']['code'] != 0:
- print response['status']['message']
- else:
- if table:
+ vmListViews = ['table', 'long', 'ids']
+ view = 'long' #Default view
+
+ if args:
+ view = args[0]
+ if view not in vmListViews:
+ raise ValueError('Invalid argument "%s".' % args[0])
+ if view == 'table':
allStats = {}
for s in self.s.getAllVmStats()['statsList']:
- allStats[s['vmId']] = s
- for conf in response['vmList']:
- if table:
- id = conf['vmId']
- if id not in allStats:
- continue
- status = conf['status']
- if allStats[id].get('monitorResponse') == '-1':
- status += '*'
- print "%-36s %6s %-20s %-20s %-20s" % (id,
- conf.get('pid', 'none'),
- conf.get('vmName', '<< NO NAME >>'),
- status, allStats[id].get('guestIPs', '') )
- else:
- if 'sysprepInf' in conf:
- conf['sysprepInf'] = '<<exists>>'
- printConf(conf)
- sys.exit(response['status']['code'])
+ allStats[ s['vmId'] ] = s
+
+ response = self.s.list(True)
+ for conf in response['vmList']:
+ if view == 'long':
+ if 'sysprepInf' in conf:
+ conf['sysprepInf'] = '<<exists>>'
+ printConf(conf)
+
+ elif view == 'table':
+ vmId = conf['vmId']
+ if vmId not in allStats:
+ continue
+ status = conf['status']
+ if allStats[vmId].get('monitorResponse') == '-1':
+ status += '*'
+ print "%-36s %6s %-20s %-20s %-20s" % ( vmId,
+ conf.get('pid', 'none'),
+ conf.get('vmName', '<< NO NAME >>'),
+ status, allStats[vmId].get('guestIPs', '') )
+
+ elif view == 'ids':
+ print conf['vmId']
- def do_listNames(self, args):
- response = self.s.list()
- if response['status']['code'] != 0:
- print response['status']['message']
- else:
- names = []
- for conf in response['vmList']:
- names.append(conf['vmId'])
- names.sort()
- if names:
- print '\n'.join(names)
sys.exit(response['status']['code'])
def do_destroy(self, args):
@@ -1525,12 +1518,12 @@ if __name__ == '__main__':
'Stops the emulation and graceful shutdown the virtual machine.'
)),
'list' : ( serv.do_list,
- ('[table]',
- 'Lists all available machines on the specified server and all available configuration info',
- 'If table modifier added then show table with the fields: vmId vmName Status IP'
- )),
- 'listNames' : ( serv.do_listNames,
- ('Lists all available machines on the specified server',''
+ ('[view]',
+ 'Lists all available machines on the specified server.',
+ 'Optional views:',
+ ' "long" all available configuration info (Default).',
+ ' "table" table output with the fields: vmId, vmName, Status and IP.',
+ ' "ids" all vmIds.'
)),
'pause' : ( serv.do_pause,
('<vmId>',
Erez Sh has posted comments on this change.
Change subject: Related to BZ#719516: Make list nicer and include listNames inside.
......................................................................
Patch Set 7: Looks good to me, but someone else must approve
--
To view, visit http://gerrit.usersys/676
To unsubscribe, visit http://gerrit.usersys/settings
Gerrit-MessageType: comment
Gerrit-Change-Id: I0753685d3032e79cc3f5de888b8ee7e865954d9c
Gerrit-PatchSet: 7
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: David Naori <dnaori(a)redhat.com>
Gerrit-Reviewer: Ayal Baron
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: David Naori <dnaori(a)redhat.com>
Gerrit-Reviewer: Erez Sh <erez(a)redhat.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
New patch submitted by David Naori (dnaori(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/676
commit b2bf639c6d47e89b4eef9c87345e63a7c142433b
Author: David Naori <dnaori(a)redhat.com>
Date: Thu Jul 7 16:21:15 2011 +0300
Related to BZ#719516: Make list nicer and include listNames inside.
Change-Id: I0753685d3032e79cc3f5de888b8ee7e865954d9c
diff --git a/vdsm_cli/vdsClient.py b/vdsm_cli/vdsClient.py
index 38640cc..3aa88ec 100755
--- a/vdsm_cli/vdsClient.py
+++ b/vdsm_cli/vdsClient.py
@@ -173,47 +173,40 @@ class service:
return self.ExecAndExit(self.s.changeFloppy(vmId, file))
def do_list(self, args):
- table=False
- if len(args):
- if args[0] == 'table':
- table=True
- response = self.s.list(True)
- if response['status']['code'] != 0:
- print response['status']['message']
- else:
- if table:
+ vmListViews = ['table', 'long', 'ids']
+ view = 'long' #Default view
+
+ if args:
+ view = args[0]
+ if view not in vmListViews:
+ raise ValueError()
+ if view == 'table':
allStats = {}
for s in self.s.getAllVmStats()['statsList']:
- allStats[s['vmId']] = s
- for conf in response['vmList']:
- if table:
- id = conf['vmId']
- if id not in allStats:
- continue
- status = conf['status']
- if allStats[id].get('monitorResponse') == '-1':
- status += '*'
- print "%-36s %6s %-20s %-20s %-20s" % (id,
- conf.get('pid', 'none'),
- conf.get('vmName', '<< NO NAME >>'),
- status, allStats[id].get('guestIPs', '') )
- else:
- if 'sysprepInf' in conf:
- conf['sysprepInf'] = '<<exists>>'
- printConf(conf)
- sys.exit(response['status']['code'])
+ allStats[ s['vmId'] ] = s
+
+ response = self.s.list(True)
+ for conf in response['vmList']:
+ if view == 'long':
+ if 'sysprepInf' in conf:
+ conf['sysprepInf'] = '<<exists>>'
+ printConf(conf)
+
+ elif view == 'table':
+ vmId = conf['vmId']
+ if vmId not in allStats:
+ continue
+ status = conf['status']
+ if allStats[vmId].get('monitorResponse') == '-1':
+ status += '*'
+ print "%-36s %6s %-20s %-20s %-20s" % ( vmId,
+ conf.get('pid', 'none'),
+ conf.get('vmName', '<< NO NAME >>'),
+ status, allStats[vmId].get('guestIPs', '') )
+
+ elif view == 'ids':
+ print conf['vmId']
- def do_listNames(self, args):
- response = self.s.list()
- if response['status']['code'] != 0:
- print response['status']['message']
- else:
- names = []
- for conf in response['vmList']:
- names.append(conf['vmId'])
- names.sort()
- if names:
- print '\n'.join(names)
sys.exit(response['status']['code'])
def do_destroy(self, args):
@@ -1525,12 +1518,12 @@ if __name__ == '__main__':
'Stops the emulation and graceful shutdown the virtual machine.'
)),
'list' : ( serv.do_list,
- ('[table]',
- 'Lists all available machines on the specified server and all available configuration info',
- 'If table modifier added then show table with the fields: vmId vmName Status IP'
- )),
- 'listNames' : ( serv.do_listNames,
- ('Lists all available machines on the specified server',''
+ ('[view]',
+ 'Lists all available machines on the specified server.',
+ 'Optional views:',
+ ' "long" all available configuration info (Default).',
+ ' "table" table output with the fields: vmId, vmName, Status and IP.',
+ ' "ids" all vmIds.'
)),
'pause' : ( serv.do_pause,
('<vmId>',
New patch submitted by David Naori (dnaori(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/676
commit f63d90a13ce03681ed284a632498f9345a8bbc75
Author: David Naori <dnaori(a)redhat.com>
Date: Thu Jul 7 16:21:15 2011 +0300
Related to BZ#719516 vdsClient: Make list nicer.
Change-Id: I0753685d3032e79cc3f5de888b8ee7e865954d9c
diff --git a/vdsm_cli/vdsClient.py b/vdsm_cli/vdsClient.py
index 38640cc..f539fc2 100755
--- a/vdsm_cli/vdsClient.py
+++ b/vdsm_cli/vdsClient.py
@@ -173,47 +173,40 @@ class service:
return self.ExecAndExit(self.s.changeFloppy(vmId, file))
def do_list(self, args):
- table=False
- if len(args):
- if args[0] == 'table':
- table=True
- response = self.s.list(True)
- if response['status']['code'] != 0:
- print response['status']['message']
- else:
- if table:
+ vmListViews = ['table', 'long', 'ids']
+ view = 'long' #Default view
+
+ if args:
+ view = args[0]
+ if view not in vmListViews:
+ raise ValueError
+ if view == 'table':
allStats = {}
for s in self.s.getAllVmStats()['statsList']:
- allStats[s['vmId']] = s
- for conf in response['vmList']:
- if table:
- id = conf['vmId']
- if id not in allStats:
- continue
- status = conf['status']
- if allStats[id].get('monitorResponse') == '-1':
- status += '*'
- print "%-36s %6s %-20s %-20s %-20s" % (id,
- conf.get('pid', 'none'),
- conf.get('vmName', '<< NO NAME >>'),
- status, allStats[id].get('guestIPs', '') )
- else:
- if 'sysprepInf' in conf:
- conf['sysprepInf'] = '<<exists>>'
- printConf(conf)
- sys.exit(response['status']['code'])
+ allStats[ s['vmId'] ] = s
+
+ response = self.s.list(True)
+ for conf in response['vmList']:
+ if view == 'long':
+ if 'sysprepInf' in conf:
+ conf['sysprepInf'] = '<<exists>>'
+ printConf(conf)
+
+ elif view == 'table':
+ vmId = conf['vmId']
+ if vmId not in allStats:
+ continue
+ status = conf['status']
+ if allStats[vmId].get('monitorResponse') == '-1':
+ status += '*'
+ print "%-36s %6s %-20s %-20s %-20s" % ( vmId,
+ conf.get('pid', 'none'),
+ conf.get('vmName', '<< NO NAME >>'),
+ status, allStats[vmId].get('guestIPs', '') )
+
+ elif view == 'ids':
+ print conf['vmId']
- def do_listNames(self, args):
- response = self.s.list()
- if response['status']['code'] != 0:
- print response['status']['message']
- else:
- names = []
- for conf in response['vmList']:
- names.append(conf['vmId'])
- names.sort()
- if names:
- print '\n'.join(names)
sys.exit(response['status']['code'])
def do_destroy(self, args):
@@ -1525,12 +1518,12 @@ if __name__ == '__main__':
'Stops the emulation and graceful shutdown the virtual machine.'
)),
'list' : ( serv.do_list,
- ('[table]',
- 'Lists all available machines on the specified server and all available configuration info',
- 'If table modifier added then show table with the fields: vmId vmName Status IP'
- )),
- 'listNames' : ( serv.do_listNames,
- ('Lists all available machines on the specified server',''
+ ('[view]',
+ 'Lists all available machines on the specified server.',
+ 'Optional views:',
+ ' "long" all available configuration info (Default).',
+ ' "table" table output with the fields: vmId, vmName, Status and IP.',
+ ' "ids" all vmIds.'
)),
'pause' : ( serv.do_pause,
('<vmId>',
New patch submitted by Igor Lvovsky (ilvovsky(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/646
commit f8e9eccdad40b074e4eb9a24f038635bbba2c48b
Author: Igor Lvovsky <ilvovsky(a)redhat.com>
Date: Tue Jul 5 16:00:21 2011 +0300
BZ#717952 - Fix race in moveImage with overwrite of VM with several disks on NFS domains
Change-Id: I367e83a700e84be0e4cb0ca5b7a02416cb54ac25
diff --git a/vdsm/storage/image.py b/vdsm/storage/image.py
index 51e237b..c1e3ec9 100644
--- a/vdsm/storage/image.py
+++ b/vdsm/storage/image.py
@@ -378,6 +378,10 @@ class Image:
# Avoid relink templates for SAN domains
if destDom.getStorageType() in [ sd.NFS_DOMAIN ]:
vol = destDom.produceVolume(imgUUID=imgUUID, volUUID=volUUID)
+ # Relink templates only
+ if not vol.isShared():
+ self.log.debug("Doesn't relink regular volume %s of image %s", volUUID, imgUUID)
+ return
chList = vol.getAllChildrenList(self.repoPath, destDom.sdUUID, imgUUID, volUUID)
for ch in chList:
# Remove hardlink of this template
New patch submitted by Yotam Oron (yoron(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/634
commit c4bac51ca8afabd41144097bed81068e76b75f44
Author: Yotam Oron <yoron(a)redhat.com>
Date: Mon Jun 27 15:22:39 2011 +0300
BZ#705058 - Don't start monitoring domains when reconstructing master
During master recostruction there is no point in updating the repo stats
monitoring threads (this is cause a repo stats thread reference not to
be inserted to the list of monitoring threads and hence it leaks)
Change-Id: I05914b8c3a8f304e0acda5463e1ad00630c3233b
diff --git a/vdsm/storage/sp.py b/vdsm/storage/sp.py
index c377ee4..543984c 100644
--- a/vdsm/storage/sp.py
+++ b/vdsm/storage/sp.py
@@ -1156,7 +1156,6 @@ class StoragePool:
del domDict[sdUUID]
self.setMetaParam(PMDK_DOMAINS, domDict)
self.log.info("Set storage pool domains: %s", domDict)
- self.updateMonitoringThreads()
if refresh:
self.refresh()
New patch submitted by Igor Lvovsky (ilvovsky(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/646
commit 64badb305015f011ee2f0d84daa39563f3f17db4
Author: Igor Lvovsky <ilvovsky(a)redhat.com>
Date: Tue Jul 5 16:00:21 2011 +0300
BZ#717952 - Fix race in moveImage with overwrite of VM with several disks on NFS domains
Change-Id: I367e83a700e84be0e4cb0ca5b7a02416cb54ac25
diff --git a/vdsm/storage/image.py b/vdsm/storage/image.py
index 51e237b..c1e3ec9 100644
--- a/vdsm/storage/image.py
+++ b/vdsm/storage/image.py
@@ -378,6 +378,10 @@ class Image:
# Avoid relink templates for SAN domains
if destDom.getStorageType() in [ sd.NFS_DOMAIN ]:
vol = destDom.produceVolume(imgUUID=imgUUID, volUUID=volUUID)
+ # Relink templates only
+ if not vol.isShared():
+ self.log.debug("Doesn't relink regular volume %s of image %s", volUUID, imgUUID)
+ return
chList = vol.getAllChildrenList(self.repoPath, destDom.sdUUID, imgUUID, volUUID)
for ch in chList:
# Remove hardlink of this template
New patch submitted by Igor Lvovsky (ilvovsky(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/646
commit 06bc73dd65b05a9d76938e36c93e7f12bcae0b4a
Author: Igor Lvovsky <ilvovsky(a)redhat.com>
Date: Tue Jul 5 16:00:21 2011 +0300
BZ#717952 - Fix race in moveImage with overwrite of VM with several disks on NFS domains
Change-Id: I367e83a700e84be0e4cb0ca5b7a02416cb54ac25
diff --git a/vdsm/storage/image.py b/vdsm/storage/image.py
index ccad167..c1e3ec9 100644
--- a/vdsm/storage/image.py
+++ b/vdsm/storage/image.py
@@ -378,7 +378,11 @@ class Image:
# Avoid relink templates for SAN domains
if destDom.getStorageType() in [ sd.NFS_DOMAIN ]:
vol = destDom.produceVolume(imgUUID=imgUUID, volUUID=volUUID)
- chList = vol.getAllChildrenList(self.repoPath, sdUUID, imgUUID, volUUID)
+ # Relink templates only
+ if not vol.isShared():
+ self.log.debug("Doesn't relink regular volume %s of image %s", volUUID, imgUUID)
+ return
+ chList = vol.getAllChildrenList(self.repoPath, destDom.sdUUID, imgUUID, volUUID)
for ch in chList:
# Remove hardlink of this template
v = destDom.produceVolume(imgUUID=ch['imgUUID'], volUUID=volUUID)
New patch submitted by David Naori (dnaori(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/676
commit 514f444578aebfe3174225cb331e82ae9b3728c2
Author: David Naori <dnaori(a)redhat.com>
Date: Thu Jul 7 16:21:15 2011 +0300
Related to BZ#719516 vdsClient: Make list nicer.
Change-Id: I0753685d3032e79cc3f5de888b8ee7e865954d9c
diff --git a/vdsm_cli/vdsClient.py b/vdsm_cli/vdsClient.py
index 38640cc..2a73785 100755
--- a/vdsm_cli/vdsClient.py
+++ b/vdsm_cli/vdsClient.py
@@ -173,34 +173,31 @@ class service:
return self.ExecAndExit(self.s.changeFloppy(vmId, file))
def do_list(self, args):
- table=False
- if len(args):
- if args[0] == 'table':
- table=True
+ table = {}
+ if args:
+ if args != ['table']:
+ raise ValueError
+ for s in self.s.getAllVmStats()['statsList']:
+ table[ s['vmId'] ] = s
+
response = self.s.list(True)
- if response['status']['code'] != 0:
- print response['status']['message']
- else:
+ for conf in response['vmList']:
if table:
- allStats = {}
- for s in self.s.getAllVmStats()['statsList']:
- allStats[s['vmId']] = s
- for conf in response['vmList']:
- if table:
- id = conf['vmId']
- if id not in allStats:
- continue
- status = conf['status']
- if allStats[id].get('monitorResponse') == '-1':
- status += '*'
- print "%-36s %6s %-20s %-20s %-20s" % (id,
- conf.get('pid', 'none'),
- conf.get('vmName', '<< NO NAME >>'),
- status, allStats[id].get('guestIPs', '') )
- else:
- if 'sysprepInf' in conf:
- conf['sysprepInf'] = '<<exists>>'
- printConf(conf)
+ vmId = conf['vmId']
+ if vmId not in table:
+ continue
+ status = conf['status']
+ if table[vmId].get('monitorResponse') == '-1':
+ status += '*'
+ print "%-36s %6s %-20s %-20s %-20s" % ( vmId,
+ conf.get('pid', 'none'),
+ conf.get('vmName', '<< NO NAME >>'),
+ status, table[vmId].get('guestIPs', '') )
+ else:
+ if 'sysprepInf' in conf:
+ conf['sysprepInf'] = '<<exists>>'
+ printConf(conf)
+
sys.exit(response['status']['code'])
def do_listNames(self, args):
New patch submitted by Sanjay Mehrotra (smehrotr(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/675
commit af9ee33d5c26113c4a09af45a865fecc268ece95
Author: Sanjay Mehrotra <smehrotr(a)redhat.com>
Date: Thu Jul 7 16:09:39 2011 +0300
BZ#694026 initctl libvirtd as part of its configuration
vdsm use to control libvirtd using System V init script. The changes have been made
so that vdsm start uses initctl to start libvirtd. If libvirtd is already using initctl,
then it will start libvirtd if it is not running. If initctl fails, it will default to sysv
initscript. This is patch 1 of the changes. The second patch will change the libvirtd to its
default sys v during uninstall of vdsm.
Change-Id: I0ea7b64a710b6e2c48fb96da82ebea64fd0482a4
diff --git a/vdsm.spec.in b/vdsm.spec.in
index 8f01acc..85a11ed 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -151,6 +151,12 @@ then
/usr/sbin/saslpasswd2 -p -a libvirt vdsm@rhevh < "$pfile"
fi
+# copy the libvirtd upstart job control.
+upstart=`rpm -ql libvirt | grep libvirtd.upstart | head --lines=1`
+if [ -f "$upstart" ]; then
+ cp -f $upstart /etc/vdsm/libvirtd.upstart
+fi
+
%preun
if [ "$1" -eq 0 ]
then
@@ -180,6 +186,14 @@ _EOF
if selinuxenabled; then
setsebool virt_use_nfs off
fi
+
+ if initctl list | grep -q libvirtd; then
+ initctl stop libvirtd
+ rm -f /etc/init/libvirtd.conf
+ chkconfig libvirtd on
+ service libvirtd condrestart
+ rm -f /etc/vdsm/libvirtd.upstart
+ fi
fi
%postun
diff --git a/vdsm/vdsmd b/vdsm/vdsmd
index 6b35e23..53705b3 100755
--- a/vdsm/vdsmd
+++ b/vdsm/vdsmd
@@ -30,7 +30,7 @@ RESPAWNPIDFILE=@P_VDSM_RUN@/respawn.pid
CORE_DUMP_PATH=/var/log/core/core.%p.%t.dump
DOM_METADATA_BACKUP_DIR=/var/log/vdsm/backup
CORE_PATTERN=/proc/sys/kernel/core_pattern
-NEEDED_SERVICES="iscsid multipathd libvirtd"
+NEEDED_SERVICES="iscsid multipathd"
CONFLICTING_SERVICES="libvirt-guests"
# TODO: Remove cpu cgroup disabling when BZ#623712 is resolved (kernel on
# massively multicore platforms failing to scale with cgroups turned on)
@@ -323,8 +323,6 @@ configure_libvirt()
virt_use_nfs=1
_EOF
/usr/sbin/setsebool virt_use_nfs on
-
- service libvirtd condrestart
}
RETVAL=0
@@ -343,26 +341,47 @@ reconfigure() {
configure_libvirt $args
}
+start_libvirtd() {
+ if ! initctl list | grep -q libvirtd
+ then
+ chkconfig libvirtd off && service libvirtd stop >/dev/null 2>&1
+ cp -f /etc/vdsm/libvirtd.upstart /etc/init/libvirtd.conf
+ initctl reload-configuration && initctl start libvirtd
+ fi
+
+ initctl status libvirtd | grep -q "libvirtd start" && return 0;
+
+ if initctl status libvirtd | grep -q "libvirtd stop"
+ then
+ initctl start libvirtd
+ else
+ service libvirtd condrestart
+ chkconfig libvirtd on
+ fi
+}
+
start() {
local ret_val
python @P_VDSM@/hooks.pyc before_vdsm_start
- reconfigure noforce
+ start_needed_srv
ret_val=$?
if [ $ret_val -ne 0 ]
then
- log_failure_msg "$prog: failed to reconfigure libvirt"
- return $ret_val
+ log_failure_msg "$prog: one of the dependent services did not start, error code $ret_val"
+ return $ret_val
fi
- start_needed_srv
+ reconfigure noforce
ret_val=$?
if [ $ret_val -ne 0 ]
then
- log_failure_msg "$prog: one of the dependent services did not start, error code $ret_val"
- return $ret_val
+ log_failure_msg "$prog: failed to reconfigure libvirt"
+ return $ret_val
fi
+ start_libvirtd
+
shutdown_conflicting_srv
@P_VDSM@/vdsm-restore-net-config
load_needed_modules
New patch submitted by David Naori (dnaori(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/676
commit 25b0be006a5529281dd48c8acbdb7d264f057dcd
Author: David Naori <dnaori(a)redhat.com>
Date: Thu Jul 7 16:21:15 2011 +0300
Related to BZ#719516 vdsClient: Make list nicer.
Change-Id: I0753685d3032e79cc3f5de888b8ee7e865954d9c
diff --git a/vdsm_cli/vdsClient.py b/vdsm_cli/vdsClient.py
index 38640cc..9a63129 100755
--- a/vdsm_cli/vdsClient.py
+++ b/vdsm_cli/vdsClient.py
@@ -173,34 +173,34 @@ class service:
return self.ExecAndExit(self.s.changeFloppy(vmId, file))
def do_list(self, args):
- table=False
- if len(args):
- if args[0] == 'table':
- table=True
+ table = {}
+ if args:
+ if args != ['table']:
+ raise ValueError
+ for s in self.s.getAllVmStats()['statsList']:
+ table[ s['vmId'] ] = s
+
response = self.s.list(True)
- if response['status']['code'] != 0:
- print response['status']['message']
- else:
+ if response['status']['code']:
+ return response['status']['code'], response['status']['message']
+
+ for conf in response['vmList']:
if table:
- allStats = {}
- for s in self.s.getAllVmStats()['statsList']:
- allStats[s['vmId']] = s
- for conf in response['vmList']:
- if table:
- id = conf['vmId']
- if id not in allStats:
- continue
- status = conf['status']
- if allStats[id].get('monitorResponse') == '-1':
- status += '*'
- print "%-36s %6s %-20s %-20s %-20s" % (id,
- conf.get('pid', 'none'),
- conf.get('vmName', '<< NO NAME >>'),
- status, allStats[id].get('guestIPs', '') )
- else:
- if 'sysprepInf' in conf:
- conf['sysprepInf'] = '<<exists>>'
- printConf(conf)
+ vmId = conf['vmId']
+ if vmId not in table:
+ continue
+ status = conf['status']
+ if table[vmId].get('monitorResponse') == '-1':
+ status += '*'
+ print "%-36s %6s %-20s %-20s %-20s" % ( vmId,
+ conf.get('pid', 'none'),
+ conf.get('vmName', '<< NO NAME >>'),
+ status, table[vmId].get('guestIPs', '') )
+ else:
+ if 'sysprepInf' in conf:
+ conf['sysprepInf'] = '<<exists>>'
+ printConf(conf)
+
sys.exit(response['status']['code'])
def do_listNames(self, args):
New patch submitted by David Naori (dnaori(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/671
commit 07456abc7df872f6f910abd6ed3a95f321541eb3
Author: David Naori <dnaori(a)redhat.com>
Date: Thu Jul 7 09:24:20 2011 +0300
BZ#719516 vdsClient: avoid possible race in list table.
Change-Id: I7fc52ac0428fd0a2128376a97d56df0fc41ff8ac
diff --git a/vdsm_cli/vdsClient.py b/vdsm_cli/vdsClient.py
index 903dc46..38640cc 100755
--- a/vdsm_cli/vdsClient.py
+++ b/vdsm_cli/vdsClient.py
@@ -188,6 +188,8 @@ class service:
for conf in response['vmList']:
if table:
id = conf['vmId']
+ if id not in allStats:
+ continue
status = conf['status']
if allStats[id].get('monitorResponse') == '-1':
status += '*'
@@ -201,7 +203,6 @@ class service:
printConf(conf)
sys.exit(response['status']['code'])
-
def do_listNames(self, args):
response = self.s.list()
if response['status']['code'] != 0:
Patch was abandoned by David Naori (dnaori(a)redhat.com) because: Can't push the previous patch again.
You can review this change at: http://gerrit.usersys.redhat.com/671
New patch submitted by Igor Lvovsky (ilvovsky(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/665
commit 03c28923ea2866df84db866c90efd8a0768bdcba
Author: Igor Lvovsky <ilvovsky(a)redhat.com>
Date: Wed Jul 6 13:52:45 2011 +0300
BZ#719255 - Release lock if prepare volume failed
Change-Id: I57a78ff800f0b8f006ca9c4fed3bb6ac185a55ed
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 7081558..b0d5d21 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -1663,12 +1663,17 @@ class HSM:
imageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
imgResource = rmanager.acquireResource(imageResourcesNamespace, imgUUID, rm.LockType.exclusive)
imgResource.autoRelease = False
- vol = SDF.produce(sdUUID=sdUUID).produceVolume(imgUUID=imgUUID, volUUID=volUUID)
- # NB We want to be sure that at this point HSM does not use stale LVM
- # cache info, so we call refresh explicitely. We may want to remove
- # this refresh later, when we come up with something better.
- vol.refreshVolume()
- vol.prepare(rw=rw)
+ try:
+ vol = SDF.produce(sdUUID=sdUUID).produceVolume(imgUUID=imgUUID, volUUID=volUUID)
+ # NB We want to be sure that at this point HSM does not use stale LVM
+ # cache info, so we call refresh explicitely. We may want to remove
+ # this refresh later, when we come up with something better.
+ vol.refreshVolume()
+ vol.prepare(rw=rw)
+ except:
+ imgResource.autoRelease = True
+ self.log.error("Prepare volume %s in domain %s failed", volUUID, sdUUID, exc_info=True)
+ raise
def public_teardownVolume(self, sdUUID, spUUID, imgUUID, volUUID, rw=False, options = None):
New patch submitted by Eduardo Warszawski (ewarszaw(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/674
commit 705c8ae081f275c5d0f54d117d0addbb524a8403
Author: Eduardo Warszawski <ewarszaw(a)redhat.com>
Date: Thu Jul 7 16:04:36 2011 +0300
Fix vgs reload for stale VGs.
Stale VG: A VG that is in canche but not in lvm data.
Change-Id: Icfdf59db6488a0f4544d4fc7ad77fc927c8fd23b
diff --git a/vdsm/storage/lvm.py b/vdsm/storage/lvm.py
index 8a89a01..ef60c02 100644
--- a/vdsm/storage/lvm.py
+++ b/vdsm/storage/lvm.py
@@ -436,7 +436,7 @@ class LVMInfo(object):
for staleName in staleVGs:
removeVgMapping(staleName)
log.warning("Removing stale VG: %s", staleName)
- self._lvs.pop((staleName), None)
+ self._vgs.pop((staleName), None)
return updatedVGs
New patch submitted by David Naori (dnaori(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/671
commit 1e17c8040d35507e796ff576c6f864a536b568ba
Author: David Naori <dnaori(a)redhat.com>
Date: Thu Jul 7 09:24:20 2011 +0300
BZ#719516 vdsClient: avoid possible race in list table.
Change-Id: I7fc52ac0428fd0a2128376a97d56df0fc41ff8ac
diff --git a/vdsm_cli/vdsClient.py b/vdsm_cli/vdsClient.py
index 903dc46..38640cc 100755
--- a/vdsm_cli/vdsClient.py
+++ b/vdsm_cli/vdsClient.py
@@ -188,6 +188,8 @@ class service:
for conf in response['vmList']:
if table:
id = conf['vmId']
+ if id not in allStats:
+ continue
status = conf['status']
if allStats[id].get('monitorResponse') == '-1':
status += '*'
@@ -201,7 +203,6 @@ class service:
printConf(conf)
sys.exit(response['status']['code'])
-
def do_listNames(self, args):
response = self.s.list()
if response['status']['code'] != 0:
New patch submitted by Eduardo Warszawski (ewarszaw(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/662
commit 8e2a1b9332d66cde99b46de62f7ea5d4efad641d
Author: Eduardo Warszawski <ewarszaw(a)redhat.com>
Date: Tue Jul 5 18:05:39 2011 +0300
BZ#718993 - Remove vgchange --refresh
vgchange --refresh:
If any logical volume in the volume group is active, reload its metadata.
vgchange --refresh may be too long when are many LVs in the VG.
vgchange --refresh was NOT needed for refreshLV after migration in bug 651803,
since the LV metadata is reloaded using lvchange --refresh.
Change-Id: I123480a2f687bbfacaf8308fbfa3d0a7f5c29c47
diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py
index 9f744b2..0c00dd3 100644
--- a/vdsm/storage/blockSD.py
+++ b/vdsm/storage/blockSD.py
@@ -105,7 +105,7 @@ class VGTagMetadataRW(object):
self._vgName = vgName
def readlines(self):
- lvm.refreshVG(self._vgName)
+ lvm.invalidateVG(self._vgName)
vg = lvm.getVG(self._vgName)
metadata = []
for tag in vg.tags:
@@ -921,7 +921,7 @@ class BlockStorageDomain(sd.StorageDomain):
def refresh(self):
self.refreshDirTree()
- lvm.refreshVG(self.sdUUID)
+ lvm.invalidateVG(self.sdUUID)
self._metadata = selectMetadata(self.sdUUID)
@staticmethod
diff --git a/vdsm/storage/lvm.py b/vdsm/storage/lvm.py
index d6159eb..8a89a01 100644
--- a/vdsm/storage/lvm.py
+++ b/vdsm/storage/lvm.py
@@ -881,14 +881,9 @@ def deactivateVG(vgName):
_setVgAvailability(vgName, available="n")
-def refreshVG(vgName):
- #If any logical volume in the volume group is active, reload its metadata.
- cmd = ['vgchange', '--refresh', vgName]
- rc, out, err = _lvminfo.cmd(cmd)
+def invalidateVG(vgName):
_lvminfo._invalidatevgs(vgName)
_lvminfo._invalidatelvs(vgName)
- if rc != 0:
- raise se.LogicalVolumeRefreshError("vgchange --refresh %s failed" % (vgName,))
#
New patch submitted by Igor Lvovsky (ilvovsky(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/665
commit 700bc5898fa71ae91b8e7ac2cfdd4804d40cdab4
Author: Igor Lvovsky <ilvovsky(a)redhat.com>
Date: Wed Jul 6 13:52:45 2011 +0300
BZ#719255 - Release lock if prepare volume failed
Change-Id: I57a78ff800f0b8f006ca9c4fed3bb6ac185a55ed
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 7081558..14c281e 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -1663,12 +1663,17 @@ class HSM:
imageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
imgResource = rmanager.acquireResource(imageResourcesNamespace, imgUUID, rm.LockType.exclusive)
imgResource.autoRelease = False
- vol = SDF.produce(sdUUID=sdUUID).produceVolume(imgUUID=imgUUID, volUUID=volUUID)
- # NB We want to be sure that at this point HSM does not use stale LVM
- # cache info, so we call refresh explicitely. We may want to remove
- # this refresh later, when we come up with something better.
- vol.refreshVolume()
- vol.prepare(rw=rw)
+ try:
+ vol = SDF.produce(sdUUID=sdUUID).produceVolume(imgUUID=imgUUID, volUUID=volUUID)
+ # NB We want to be sure that at this point HSM does not use stale LVM
+ # cache info, so we call refresh explicitely. We may want to remove
+ # this refresh later, when we come up with something better.
+ vol.refreshVolume()
+ vol.prepare(rw=rw)
+ except Exception:
+ imgResource.autoRelease = True
+ self.log.error("Prepare volume %s in domain %s failed", volUUID, sdUUID, exc_info=True)
+ raise
def public_teardownVolume(self, sdUUID, spUUID, imgUUID, volUUID, rw=False, options = None):
New patch submitted by David Naori (dnaori(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/667
commit 62c25ccb5a6633e76289e60f926d507ec614ac11
Author: David Naori <dnaori(a)redhat.com>
Date: Wed Jul 6 15:09:39 2011 +0300
BZ#719301 clientIF: fix log typo.
Change-Id: Iae1a8ba75df6e85b22184fe0fd9e7ccbecbb9b90
diff --git a/vdsm/clientIF.py b/vdsm/clientIF.py
index c1219e4..c5169a4 100644
--- a/vdsm/clientIF.py
+++ b/vdsm/clientIF.py
@@ -433,7 +433,7 @@ class clientIF:
Destroy the specified VM.
"""
self.vmContainerLock.acquire()
- self.log.info("vmContainerLock aquired by vm %s", vmId)
+ self.log.info("vmContainerLock acquired by vm %s", vmId)
try:
v = self.vmContainer.get(vmId)
if not v:
@@ -639,7 +639,7 @@ class clientIF:
vmParams['displayIp'] = self._getNetworkIp(vmParams.get(
'displayNetwork'))
self.vmContainerLock.acquire()
- self.log.info("vmContainerLock aquired by vm %s", vmParams['vmId'])
+ self.log.info("vmContainerLock acquired by vm %s", vmParams['vmId'])
try:
if 'recover' not in vmParams:
if vmParams['vmId'] in self.vmContainer:
New patch submitted by Igor Lvovsky (ilvovsky(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/665
commit 25c86464dab07fd608ee326f59a253f93037672c
Author: Igor Lvovsky <ilvovsky(a)redhat.com>
Date: Wed Jul 6 13:52:45 2011 +0300
BZ#719255 - Release lock if prepare volume failed
Change-Id: I57a78ff800f0b8f006ca9c4fed3bb6ac185a55ed
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 7081558..e8194af 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -1663,12 +1663,17 @@ class HSM:
imageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
imgResource = rmanager.acquireResource(imageResourcesNamespace, imgUUID, rm.LockType.exclusive)
imgResource.autoRelease = False
- vol = SDF.produce(sdUUID=sdUUID).produceVolume(imgUUID=imgUUID, volUUID=volUUID)
- # NB We want to be sure that at this point HSM does not use stale LVM
- # cache info, so we call refresh explicitely. We may want to remove
- # this refresh later, when we come up with something better.
- vol.refreshVolume()
- vol.prepare(rw=rw)
+ try:
+ vol = SDF.produce(sdUUID=sdUUID).produceVolume(imgUUID=imgUUID, volUUID=volUUID)
+ # NB We want to be sure that at this point HSM does not use stale LVM
+ # cache info, so we call refresh explicitely. We may want to remove
+ # this refresh later, when we come up with something better.
+ vol.refreshVolume()
+ vol.prepare(rw=rw)
+ except Exception:
+ imgResource.autoRelease = True
+ self.log.error("Prepare volume %s in domain %s failed", volUUID, sdUUID, exc_info=True)
+ raise
def public_teardownVolume(self, sdUUID, spUUID, imgUUID, volUUID, rw=False, options = None):
@@ -1691,10 +1696,15 @@ class HSM:
self.validatePoolSD(spUUID, sdUUID)
vars.task.getSharedLock(STORAGE, sdUUID)
- volclass = SDF.produce(sdUUID).getVolumeClass()
- volclass.teardown(sdUUID=sdUUID, volUUID=volUUID)
- imageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
- rmanager.releaseResource(imageResourcesNamespace, imgUUID)
+ try:
+ volclass = SDF.produce(sdUUID).getVolumeClass()
+ volclass.teardown(sdUUID=sdUUID, volUUID=volUUID)
+ except Exception:
+ self.log.error("Teardown volume %s in domain %s failed", volUUID, sdUUID, exc_info=True)
+ raise
+ finally:
+ imageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
+ rmanager.releaseResource(imageResourcesNamespace, imgUUID)
def public_getVolumesList(self, sdUUID, spUUID, imgUUID=volume.BLANK_UUID, options = None):
New patch submitted by Igor Lvovsky (ilvovsky(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/646
commit 462529ce907980458db5a24849107a85ccbce9e7
Author: Igor Lvovsky <ilvovsky(a)redhat.com>
Date: Thu Jun 30 18:01:27 2011 +0300
BZ#717952 - Fix race in moveImage with overwrite of VM with several disks on NFS domains
Change-Id: I367e83a700e84be0e4cb0ca5b7a02416cb54ac25
diff --git a/vdsm/storage/image.py b/vdsm/storage/image.py
index 85f9561..2eef6a5 100644
--- a/vdsm/storage/image.py
+++ b/vdsm/storage/image.py
@@ -55,7 +55,7 @@ class Image:
Consist from chain of volumes.
"""
log = logging.getLogger('Storage.Image')
- _lock = threading.Lock()
+ _fakeTemplateLock = threading.Lock()
@classmethod
def createImageRollback(cls, taskObj, imageDir):
@@ -384,15 +384,18 @@ class Image:
self.log.error("Unexpected error", exc_info=True)
raise se.CouldNotValideTemplateOnTargetDomain("Template %s Destination domain %s: %s" % (pimg, dstSdUUID, str(e)))
- def __templateRelink(self, sdUUID, imgUUID, volUUID):
+ def __templateRelink(self, destDom, imgUUID, volUUID):
"""
Relink all hardlinks of the template 'volUUID' in all VMs based on it
"""
- destDom = SDF.produce(sdUUID)
# Avoid relink templates for SAN domains
if destDom.getStorageType() in [ sd.NFS_DOMAIN ]:
vol = destDom.produceVolume(imgUUID=imgUUID, volUUID=volUUID)
- chList = vol.getAllChildrenList(self.repoPath, sdUUID, imgUUID, volUUID)
+ # Relink templates only
+ if not vol.isShared():
+ self.log.debug("Doesn't relink regular volume %s of image %s", volUUID, imgUUID)
+ return
+ chList = vol.getAllChildrenList(self.repoPath, destDom.sdUUID, imgUUID, volUUID)
for ch in chList:
# Remove hardlink of this template
v = destDom.produceVolume(imgUUID=ch['imgUUID'], volUUID=volUUID)
@@ -400,7 +403,7 @@ class Image:
# Now we should re-link deleted hardlink, if exists
newVol = destDom.produceVolume(imgUUID=imgUUID, volUUID=volUUID)
- imageDir = self.getImageDir(sdUUID, ch['imgUUID'])
+ imageDir = self.getImageDir(destDom.sdUUID, ch['imgUUID'])
newVol.share(imageDir)
else:
self.log.debug("Doesn't relink templates non-NFS domain %s", destDom.sdUUID)
@@ -409,8 +412,7 @@ class Image:
"""
Create fake template (relevant for Backup domain only)
"""
- try:
- self._lock.acquire()
+ with self._fakeTemplateLock:
try:
destDom = SDF.produce(sdUUID)
volclass = destDom.getVolumeClass()
@@ -430,14 +432,12 @@ class Image:
# Mark fake volume as shared
vol.setShared()
# Now we should re-link all hardlinks of this template in all VMs based on it
- self.__templateRelink(sdUUID, volParams['imgUUID'], volParams['volUUID'])
+ self.__templateRelink(destDom, volParams['imgUUID'], volParams['volUUID'])
self.log.debug("Succeeded to create fake image %s in domain %s", volParams['imgUUID'], destDom.sdUUID)
except Exception:
self.log.error("Failure to create fake image %s in domain %s", volParams['imgUUID'],
destDom.sdUUID, exc_info=True)
- finally:
- self._lock.release()
def isLegal(self, sdUUID, imgUUID):
"""
@@ -593,7 +593,7 @@ class Image:
if force:
leafVol = chains['dstChain'][-1]
# Now we should re-link all deleted hardlinks, if exists
- self.__templateRelink(destDom.sdUUID, imgUUID, leafVol.volUUID)
+ self.__templateRelink(destDom, imgUUID, leafVol.volUUID)
# At this point we successfully finished the 'copy' part of the operation
# and we can clear all recoveries.
@@ -807,7 +807,7 @@ class Image:
if force:
# Now we should re-link all deleted hardlinks, if exists
- self.__templateRelink(dstSdUUID, dstImgUUID, dstVolUUID)
+ self.__templateRelink(destDom, dstImgUUID, dstVolUUID)
except se.StorageException, e:
self.log.error("Unexpected error", exc_info=True)
raise
New patch submitted by Dan Kenigsberg (danken(a)redhat.com)
You can review this change at: http://gerrit.usersys.redhat.com/645
commit dc821613120e7c42874ab607ac5536346739d826
Author: Dan Kenigsberg <danken(a)redhat.com>
Date: Sun Jul 3 11:12:42 2011 +0300
Related to BZ#716692: avoid pyflakes error
Change-Id: I0a3d179549aa744ac47749fbcac236ef2211cbe9
diff --git a/vdsm/netinfo.py b/vdsm/netinfo.py
index c26bc25..e1bbf90 100644
--- a/vdsm/netinfo.py
+++ b/vdsm/netinfo.py
@@ -147,7 +147,7 @@ def getRoutes():
gateways = dict()
with open("/proc/net/route") as route_file:
- head = route_file.readline()
+ route_file.readline() # skip header line
for route_line in route_file.xreadlines():
route_parm = route_line.rstrip().split('\t')