Change in vdsm[master]: Add qemu's memory usage to VM statistics.
by ghammer@redhat.com
Gal Hammer has uploaded a new change for review.
Change subject: Add qemu's memory usage to VM statistics.
......................................................................
Add qemu's memory usage to VM statistics.
Change-Id: Ibeb35759454c4a9b41e1303956267e93ca3545a0
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=799285
Signed-off-by: Gal Hammer <ghammer(a)redhat.com>
---
M vdsm/config.py.in
M vdsm/libvirtvm.py
2 files changed, 14 insertions(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/06/9006/1
diff --git a/vdsm/config.py.in b/vdsm/config.py.in
index df85e7e..ee1627b 100644
--- a/vdsm/config.py.in
+++ b/vdsm/config.py.in
@@ -111,6 +111,8 @@
('vm_sample_net_interval', '5', None),
('vm_sample_net_window', '2', None),
+
+ ('vm_sample_memory_interval', '2', None),
('trust_store_path', '@TRUSTSTORE@',
'Where the certificates and keys are situated.'),
diff --git a/vdsm/libvirtvm.py b/vdsm/libvirtvm.py
index 86e39a3..f76f35c 100644
--- a/vdsm/libvirtvm.py
+++ b/vdsm/libvirtvm.py
@@ -91,10 +91,13 @@
self._sampleNet,
config.getint('vars', 'vm_sample_net_interval'),
config.getint('vars', 'vm_sample_net_window')))
+ self.sampleMem = (utils.AdvancedStatsFunction(self._sampleMem,
+ config.getint('vars', 'vm_sample_memory_interval')))
self.addStatsFunction(
self.highWrite, self.updateVolumes, self.sampleCpu,
- self.sampleDisk, self.sampleDiskLatency, self.sampleNet)
+ self.sampleDisk, self.sampleDiskLatency, self.sampleNet,
+ self.sampleMem)
def _highWrite(self):
if not self._vm.isDisksStatsCollectionEnabled():
@@ -168,6 +171,14 @@
netSamples[nic.name] = self._vm._dom.interfaceStats(nic.name)
return netSamples
+ def _sampleMem(self):
+ memUsage = {}
+ for line in open('/proc/%d/status' %(self.conf['pid'])):
+ var, value = line.strip().split()[0:2]
+ if var in ('VmSize:', 'VmRSS:', 'VmData:'):
+ memUsage[var[:-1]] = long(value)
+ return memUsage
+
def _diff(self, prev, curr, val):
return prev[val] - curr[val]
--
To view, visit http://gerrit.ovirt.org/9006
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ibeb35759454c4a9b41e1303956267e93ca3545a0
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Gal Hammer <ghammer(a)redhat.com>
10 years
Change in vdsm[master]: [WIP] Start moving proc parsing to it's own module
by smizrahi@redhat.com
Saggi Mizrahi has uploaded a new change for review.
Change subject: [WIP] Start moving proc parsing to it's own module
......................................................................
[WIP] Start moving proc parsing to it's own module
Change-Id: I7ba84c7ece95bdef7448a7c7af277e7f58695401
Signed-off-by: Saggi Mizrahi <smizrahi(a)redhat.com>
---
M vdsm.spec.in
M vdsm/API.py
M vdsm/Makefile.am
M vdsm/caps.py
A vdsm/procfs.py
M vdsm/utils.py
M vdsm/vm.py
7 files changed, 53 insertions(+), 40 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/13/7513/1
diff --git a/vdsm.spec.in b/vdsm.spec.in
index bd01c2a..1f01961 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -573,6 +573,7 @@
%{_datadir}/%{vdsm_name}/supervdsmServer.py*
%{_datadir}/%{vdsm_name}/vmChannels.py*
%{_datadir}/%{vdsm_name}/vmContainer.py*
+%{_datadir}/%{vdsm_name}/procfs.py*
%{_datadir}/%{vdsm_name}/tc.py*
%{_datadir}/%{vdsm_name}/vdsm
%{_datadir}/%{vdsm_name}/vdsm-restore-net-config
diff --git a/vdsm/API.py b/vdsm/API.py
index 720c3b9..aab69cd 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -37,6 +37,7 @@
from vdsm.define import doneCode, errCode, Kbytes, Mbytes
import caps
from vdsm.config import config
+import procfs
import supervdsm
@@ -864,7 +865,7 @@
"""
def _readSwapTotalFree():
- meminfo = utils.readMemInfo()
+ meminfo = procfs.meminfo()
return meminfo['SwapTotal'] / 1024, meminfo['SwapFree'] / 1024
stats = {}
@@ -1111,17 +1112,16 @@
memCommitted = self._memCommitted()
resident = 0
for v in self._cif.vmContainer.getVMs():
- if v.conf['pid'] == '0':
- continue
try:
- statmfile = file('/proc/' + v.conf['pid'] + '/statm')
- resident += int(statmfile.read().split()[1])
+ resident += v.statm().resident
except:
pass
+
resident *= PAGE_SIZE_BYTES
- meminfo = utils.readMemInfo()
- freeOrCached = (meminfo['MemFree'] +
- meminfo['Cached'] + meminfo['Buffers']) * Kbytes
+
+ meminfo = procfs.meminfo()
+ freeOrCached = (meminfo['MemFree'] + meminfo['Cached'] +
+ meminfo['Buffers']) * Kbytes
return freeOrCached + resident - memCommitted - \
config.getint('vars', 'host_mem_reserve') * Mbytes
diff --git a/vdsm/Makefile.am b/vdsm/Makefile.am
index 574d762..1a3ac43 100644
--- a/vdsm/Makefile.am
+++ b/vdsm/Makefile.am
@@ -47,6 +47,7 @@
momIF.py \
neterrors.py \
parted_utils.py \
+ procfs.py \
pthread.py \
supervdsm.py \
supervdsmServer.py \
diff --git a/vdsm/caps.py b/vdsm/caps.py
index f1641ff..39fc837 100644
--- a/vdsm/caps.py
+++ b/vdsm/caps.py
@@ -41,6 +41,7 @@
from vdsm import utils
from vdsm import constants
import storage.hba
+import procfs
# For debian systems we can use python-apt if available
try:
@@ -271,7 +272,7 @@
caps['HBAInventory'] = storage.hba.HBAInventory()
caps['vmTypes'] = ['kvm']
- caps['memSize'] = str(utils.readMemInfo()['MemTotal'] / 1024)
+ caps['memSize'] = str(procfs.meminfo()['MemTotal'] / 1024)
caps['reservedMem'] = str(
config.getint('vars', 'host_mem_reserve') +
config.getint('vars', 'extra_mem_reserve'))
diff --git a/vdsm/procfs.py b/vdsm/procfs.py
new file mode 100644
index 0000000..29fc973
--- /dev/null
+++ b/vdsm/procfs.py
@@ -0,0 +1,31 @@
+from collections import namedtuple
+
+buffsize = 4096
+
+MemStat = namedtuple("MemStat",
+ "size, resident, share, text, UNUSED1, data, UNUSED2")
+
+
+def statm(pid):
+ """
+ Parses statm for a pid. Note all results are in pages.
+ """
+ with open("/proc/%d/statm" % pid, "rb") as f:
+ return MemStat(*(int(val) for val in f.read().split()))
+
+
+def meminfo():
+ """
+ Parse ``/proc/meminfo`` and return its content as a dictionary.
+
+ note.
+ All values are in KB
+ """
+ meminfo = {}
+ with open("/proc/meminfo", "rb") as f:
+ f.seek(0)
+ lines = f.readlines()
+ for var, val in (l.split()[0:2] for l in lines):
+ meminfo[var[:-1]] = int(val)
+
+ return meminfo
diff --git a/vdsm/utils.py b/vdsm/utils.py
index 5e2d4e5..048a528 100644
--- a/vdsm/utils.py
+++ b/vdsm/utils.py
@@ -19,7 +19,8 @@
#
"""
-A module containing miscellaneous functions and classes that are user plentifuly around vdsm.
+A module containing miscellaneous functions and classes that are user
+plentifuly around vdsm.
.. attribute:: utils.symbolerror
@@ -28,7 +29,8 @@
from SimpleXMLRPCServer import SimpleXMLRPCServer
import SocketServer
import threading
-import os, time
+import os
+import time
import logging
import errno
import subprocess
@@ -42,6 +44,7 @@
import constants
from config import config
import netinfo
+import procfs
_THP_STATE_PATH = '/sys/kernel/mm/transparent_hugepage/enabled'
if not os.path.exists(_THP_STATE_PATH):
@@ -63,34 +66,6 @@
os.unlink(fileToRemove)
except:
pass
-
-def readMemInfo():
- """
- Parse ``/proc/meminfo`` and return its content as a dictionary.
-
- For a reason unknown to me, ``/proc/meminfo`` is is sometime
- empty when opened. If that happens, the function retries to open it
- 3 times.
-
- :returns: a dictionary representation of ``/proc/meminfo``
- """
- # FIXME the root cause for these retries should be found and fixed
- tries = 3
- meminfo = {}
- while True:
- tries -= 1
- try:
- lines = []
- lines = file('/proc/meminfo').readlines()
- for line in lines:
- var, val = line.split()[0:2]
- meminfo[var[:-1]] = int(val)
- return meminfo
- except:
- logging.warning(lines, exc_info=True)
- if tries <= 0:
- raise
- time.sleep(0.1)
#Threaded version of SimpleXMLRPCServer
class SimpleThreadedXMLRPCServer(SocketServer.ThreadingMixIn, SimpleXMLRPCServer):
@@ -225,7 +200,7 @@
"""
BaseSample.__init__(self, pid, ifids)
self.totcpu = TotalCpuSample()
- meminfo = readMemInfo()
+ meminfo = procfs.meminfo()
freeOrCached = (meminfo['MemFree'] +
meminfo['Cached'] + meminfo['Buffers'])
self.memUsed = 100 - int(100.0 * (freeOrCached) / meminfo['MemTotal'])
diff --git a/vdsm/vm.py b/vdsm/vm.py
index c1a22b0..bd436e0 100644
--- a/vdsm/vm.py
+++ b/vdsm/vm.py
@@ -36,6 +36,7 @@
import libvirt
from vdsm import vdscli
import caps
+import procfs
DEFAULT_BRIDGE = config.get("vars", "default_bridge")
@@ -693,6 +694,9 @@
load = len(self.cif.vmContainer.getVMs())
return base * (doubler + load) / doubler
+ def statm(self):
+ return procfs.statm(int(self.conf['pid']))
+
def saveState(self):
if self.destroyed:
return
--
To view, visit http://gerrit.ovirt.org/7513
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I7ba84c7ece95bdef7448a7c7af277e7f58695401
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Saggi Mizrahi <smizrahi(a)redhat.com>
10 years
Change in vdsm[master]: Refactor prepareVolumePath
by smizrahi@redhat.com
Saggi Mizrahi has uploaded a new change for review.
Change subject: Refactor prepareVolumePath
......................................................................
Refactor prepareVolumePath
Change-Id: I57bb8684fd11a47843a158d13fcc2815147fa7ef
Signed-off-by: Saggi Mizrahi <smizrahi(a)redhat.com>
---
M vdsm/API.py
M vdsm/clientIF.py
M vdsm/libvirtvm.py
M vdsm/storage/devicemapper.py
M vdsm/vm.py
5 files changed, 93 insertions(+), 63 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/55/7755/1
diff --git a/vdsm/API.py b/vdsm/API.py
index 720c3b9..c324d79 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -173,8 +173,7 @@
# NOTE: pickled params override command-line params. this
# might cause problems if an upgrade took place since the
# parmas were stored.
- fname = self._cif.prepareVolumePath(paramFilespec)
- try:
+ with self._cif.preparedDrive(paramFilespec) as fname:
with file(fname) as f:
pickledMachineParams = pickle.load(f)
@@ -183,8 +182,6 @@
+ str(pickledMachineParams))
self.log.debug('former conf ' + str(vmParams))
vmParams.update(pickledMachineParams)
- finally:
- self._cif.teardownVolumePath(paramFilespec)
except:
self.log.error("Error restoring VM parameters",
exc_info=True)
@@ -299,9 +296,15 @@
:param hiberVolHandle: opaque string, indicating the location of
hibernation images.
"""
- params = {'vmId': self._UUID, 'mode': 'file',
- 'hiberVolHandle': hibernationVolHandle}
- response = self.migrate(params)
+ v = self._getVmObject()
+ if v is None:
+ return errCode['noVM']
+
+ try:
+ response = self.hibernate(hibernationVolHandle)
+ except vm.WrongStateError:
+ response = errCode['noVM']
+
if not response['status']['code']:
response['status']['message'] = 'Hibernation process starting'
return response
diff --git a/vdsm/clientIF.py b/vdsm/clientIF.py
index 55a7fc9..0446eb2 100644
--- a/vdsm/clientIF.py
+++ b/vdsm/clientIF.py
@@ -25,6 +25,7 @@
from xml.dom import minidom
import uuid
import errno
+from contextlib import contextmanager
from storage.dispatcher import Dispatcher
from storage.hsm import HSM
@@ -44,6 +45,7 @@
import blkid
import supervdsm
import vmContainer
+from storage import devicemapper
try:
import gluster.api as gapi
_glusterEnabled = True
@@ -239,50 +241,74 @@
self.log.info('Error finding path for device', exc_info=True)
raise vm.VolumeError(uuid)
+ def _preparePoolImage(self, drive):
+ res = self.irs.prepareImage(
+ drive['domainID'], drive['poolID'],
+ drive['imageID'], drive['volumeID'])
+
+ if res['status']['code']:
+ raise vm.VolumeError(drive)
+
+ drive['volumeChain'] = res['chain']
+ return res['path']
+
+ def _prepareDmDevice(self, drive, vmId):
+ volPath = devicemapper.getDevicePathByGuid(drive["GUID"])
+
+ if not os.path.exists(volPath):
+ raise vm.VolumeError(drive)
+
+ res = self.irs.appropriateDevice(drive["GUID"], vmId)
+ if res['status']['code']:
+ raise vm.VolumeError(drive)
+
+ return volPath
+
+ def _prepareScsiDevice(self, drive):
+ return self._getUUIDSpecPath(drive["UUID"])
+
+ def _prepareVmPayload(self, drive, vmId):
+ '''
+ vmPayload is a key in specParams
+ 'vmPayload': {'file': {'filename': 'content'}}
+ '''
+ for key, files in drive['specParams']['vmPayload'].iteritems():
+ if key == 'file':
+ svdsm = supervdsm.getProxy()
+ if drive['device'] == 'cdrom':
+ return svdsm.mkIsoFs(vmId, files)
+ elif drive['device'] == 'floppy':
+ return svdsm.mkFloppyFs(vmId, files)
+
+ raise vm.VolumeError(drive)
+
+ def _preparePath(self, drive):
+ return drive['path']
+
+ @contextmanager
+ def perparedDrive(self, drive, vmId=None):
+ path = self.prepareVolumePath(drive, vmId)
+ try:
+ yield path
+ finally:
+ self.teardownVolumePath(drive, vmId)
+
def prepareVolumePath(self, drive, vmId=None):
if type(drive) is dict:
- # PDIV drive format
if drive['device'] == 'disk' and vm.isVdsmImage(drive):
- res = self.irs.prepareImage(
- drive['domainID'], drive['poolID'],
- drive['imageID'], drive['volumeID'])
+ volPath = self._preparePoolImage(drive)
- if res['status']['code']:
- raise vm.VolumeError(drive)
-
- volPath = res['path']
- drive['volumeChain'] = res['chain']
-
- # GUID drive format
elif "GUID" in drive:
- volPath = os.path.join("/dev/mapper", drive["GUID"])
+ volPath = self._prepareDmDevice(drive, vmId)
- if not os.path.exists(volPath):
- raise vm.VolumeError(drive)
-
- res = self.irs.appropriateDevice(drive["GUID"], vmId)
- if res['status']['code']:
- raise vm.VolumeError(drive)
-
- # UUID drive format
elif "UUID" in drive:
- volPath = self._getUUIDSpecPath(drive["UUID"])
+ volPath = self._prepareScsiDevice(drive)
elif 'specParams' in drive and 'vmPayload' in drive['specParams']:
- '''
- vmPayload is a key in specParams
- 'vmPayload': {'file': {'filename': 'content'}}
- '''
- for key, files in drive['specParams']['vmPayload'].iteritems():
- if key == 'file':
- if drive['device'] == 'cdrom':
- volPath = supervdsm.getProxy().mkIsoFs(vmId, files)
- elif drive['device'] == 'floppy':
- volPath = \
- supervdsm.getProxy().mkFloppyFs(vmId, files)
+ volPath = self._prepareVmPayload(drive, vmId)
elif "path" in drive:
- volPath = drive['path']
+ volPath = self._preparePath(drive)
else:
raise vm.VolumeError(drive)
@@ -301,17 +327,22 @@
self.log.info("prepared volume path: %s", volPath)
return volPath
- def teardownVolumePath(self, drive):
- res = {'status': doneCode}
- if type(drive) == dict:
- try:
- res = self.irs.teardownImage(drive['domainID'],
- drive['poolID'], drive['imageID'])
- except KeyError:
- #This drive is not a vdsm image (quartet)
- self.log.info("Avoiding tear down drive %s", str(drive))
+ def _teardownPoolImage(self, drive):
+ try:
+ res = self.irs.teardownImage(drive['domainID'],
+ drive['poolID'], drive['imageID'])
+ return res['status']['code']
+ except KeyError:
+ #This drive is not a vdsm image (quartet)
+ self.log.info("Avoiding tear down drive %s", str(drive))
+ return doneCode
- return res['status']['code']
+ def teardownVolumePath(self, drive):
+ if type(drive) == dict:
+ return self._teardownPoolImage(drive)
+ else:
+ # Other types don't require tear down
+ return 0
def createVm(self, vmParams):
try:
@@ -320,6 +351,7 @@
except vmContainer.VmContainerError as e:
if e.errno == errno.EEXIST:
return errCode['exist']
+
return
def waitForShutdown(self, timeout=None):
diff --git a/vdsm/libvirtvm.py b/vdsm/libvirtvm.py
index a530228..ea0d017 100644
--- a/vdsm/libvirtvm.py
+++ b/vdsm/libvirtvm.py
@@ -404,11 +404,8 @@
hooks.before_vm_hibernate(self._vm._dom.XMLDesc(0), self._vm.conf)
try:
self._vm._vmStats.pause()
- fname = self._vm.cif.prepareVolumePath(self._dst)
- try:
+ with self._vm.cif.preparedDrive(self._dst) as fname:
self._vm._dom.save(fname)
- finally:
- self._vm.cif.teardownVolumePath(self._dst)
except:
self._vm._vmStats.cont()
raise
@@ -1397,11 +1394,8 @@
elif 'restoreState' in self.conf:
hooks.before_vm_dehibernate(self.conf.pop('_srcDomXML'), self.conf)
- fname = self.cif.prepareVolumePath(self.conf['restoreState'])
- try:
+ with self.cif.preparedDrive(self.conf['restoreState']) as fname:
self._connection.restore(fname)
- finally:
- self.cif.teardownVolumePath(self.conf['restoreState'])
self._dom = NotifyingVirDomain(
self._connection.lookupByUUIDString(self.id),
diff --git a/vdsm/storage/devicemapper.py b/vdsm/storage/devicemapper.py
index a1651e0..388c1cd 100644
--- a/vdsm/storage/devicemapper.py
+++ b/vdsm/storage/devicemapper.py
@@ -46,6 +46,10 @@
(major, minor)))
+def getDevicePathByGuid(devGuid):
+ return DMPATH_FORMAT % devGuid
+
+
def getSysfsPath(devName):
if "/" in devName:
raise ValueError("devName has an illegal format. "
diff --git a/vdsm/vm.py b/vdsm/vm.py
index 3aa9f52..49193d3 100644
--- a/vdsm/vm.py
+++ b/vdsm/vm.py
@@ -210,12 +210,9 @@
if ignoreParam in self._machineParams:
del self._machineParams[ignoreParam]
- fname = self._vm.cif.prepareVolumePath(self._dstparams)
- try:
- with file(fname, "w") as f:
+ with self._vm.cif.preparedDrive(self._dstparams) as fname:
+ with file(fname, "wb") as f:
pickle.dump(self._machineParams, f)
- finally:
- self._vm.cif.teardownVolumePath(self._dstparams)
self._vm.setDownStatus(NORMAL, "SaveState succeeded")
self.status = {
--
To view, visit http://gerrit.ovirt.org/7755
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I57bb8684fd11a47843a158d13fcc2815147fa7ef
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Saggi Mizrahi <smizrahi(a)redhat.com>
10 years
Change in vdsm[master]: [wip] hsm: remove superfluous refreshes at startup
by Federico Simoncelli
Federico Simoncelli has uploaded a new change for review.
Change subject: [wip] hsm: remove superfluous refreshes at startup
......................................................................
[wip] hsm: remove superfluous refreshes at startup
During the startup it's not mandatory to refresh the iscsi connections
(the sdcache is already stale) and the lvm module can handle the lazy
initialization.
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=870768
Change-Id: I8386d40c644c99a52f04b6b41b392abf16e3a2a6
Signed-off-by: Federico Simoncelli <fsimonce(a)redhat.com>
---
M vdsm/storage/hsm.py
1 file changed, 0 insertions(+), 3 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/76/9276/1
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 46d1605..6a5040a 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -364,9 +364,6 @@
self.log.warn("Failed to clean Storage Repository.", exc_info=True)
def storageRefresh():
- lvm._lvminfo.bootstrap()
- sdCache.refreshStorage()
-
fileUtils.createdir(self.tasksDir)
# TBD: Should this be run in connectStoragePool? Should tasksDir
# exist under pool link as well (for hsm tasks)
--
To view, visit http://gerrit.ovirt.org/9276
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I8386d40c644c99a52f04b6b41b392abf16e3a2a6
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Federico Simoncelli <fsimonce(a)redhat.com>
10 years
Change in vdsm[master]: Remove unnecesary preparePaths.
by ewarszaw@redhat.com
Eduardo has uploaded a new change for review.
Change subject: Remove unnecesary preparePaths.
......................................................................
Remove unnecesary preparePaths.
Recovering running VM's therefore paths are already prepared.
Prepare paths is not locking the volumes anymore (Federico).
Change-Id: I35890d36227633ca147387d670c152b9be357e50
---
M vdsm/clientIF.py
1 file changed, 0 insertions(+), 16 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/86/786/1
--
To view, visit http://gerrit.ovirt.org/786
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I35890d36227633ca147387d670c152b9be357e50
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Eduardo <ewarszaw(a)redhat.com>
10 years
Change in vdsm[master]: Avoid template deactivation and lock.
by ewarszaw@redhat.com
Eduardo has uploaded a new change for review.
Change subject: Avoid template deactivation and lock.
......................................................................
Avoid template deactivation and lock.
Change-Id: Ieedf863ac967f34405f038201bac324c52fbbe89
---
M vdsm/storage/blockVolume.py
M vdsm/storage/volume.py
2 files changed, 39 insertions(+), 18 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/63/863/1
--
To view, visit http://gerrit.ovirt.org/863
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ieedf863ac967f34405f038201bac324c52fbbe89
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Eduardo <ewarszaw(a)redhat.com>
10 years
Change in vdsm[master]: [WIP] Add the validateImage command to the SPM
by Federico Simoncelli
Federico Simoncelli has uploaded a new change for review.
Change subject: [WIP] Add the validateImage command to the SPM
......................................................................
[WIP] Add the validateImage command to the SPM
Signed-off-by: Federico Simoncelli <fsimonce(a)redhat.com>
Change-Id: I095362e7d1eb91045569bd9526a102392e7adbe8
---
M vdsm/API.py
M vdsm/BindingXMLRPC.py
M vdsm/storage/hsm.py
M vdsm/storage/image.py
M vdsm/storage/sp.py
M vdsm/storage/volume.py
6 files changed, 60 insertions(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/91/3491/1
--
To view, visit http://gerrit.ovirt.org/3491
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I095362e7d1eb91045569bd9526a102392e7adbe8
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Federico Simoncelli <fsimonce(a)redhat.com>
10 years
Change in vdsm[master]: image: copying a template is always allowed
by Federico Simoncelli
Federico Simoncelli has uploaded a new change for review.
Change subject: image: copying a template is always allowed
......................................................................
image: copying a template is always allowed
To copy a template from a domain to another is a safe action even if
it has images that are based on it. This patch relaxes the check during
moveImage if the operation is a copy.
Signed-off-by: Federico Simoncelli <fsimonce(a)redhat.com>
Change-Id: I9e07d569eec02ac4dcd386a7c576342f640ec242
---
M vdsm/storage/hsm.py
1 file changed, 19 insertions(+), 11 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/08/8408/1
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 50d6ce1..730ee03 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -1288,32 +1288,39 @@
self._spmSchedule(spUUID, "deleteImage", lambda : True)
- def validateImageMove(self, srcDom, dstDom, imgUUID):
+ def validateImageCopy(self, srcDom, dstDom, imgUUID, safeToMove=False):
"""
- Determines if the image move is legal.
+ Determines if it is possible to copy (or move) the image.
- Moving an image based on a template to a data domain is only allowed if
- the template exists on the target domain.
- Moving a template from a data domain is only allowed if there are no
- images based on it in the source data domain.
+ Copying or moving an image based on a template to a data domain is
+ allowed only if the template already exists on the target domain.
+ When the option safeToMove (default: False) is active (True) an
+ extra check makes sure that if the image is a template (on a data
+ domain) then there are no other images based on it and therefore it
+ is safe to remove it.
"""
srcAllVols = srcDom.getAllVolumes()
dstAllVols = dstDom.getAllVolumes()
# Filter volumes related to this image
srcVolsImgs = sd.getVolsOfImage(srcAllVols, imgUUID)
+
# Find the template
for volName, imgsPar in srcVolsImgs.iteritems():
if len(imgsPar.imgs) > 1:
# This is the template. Should be only one.
tName, tImgs = volName, imgsPar.imgs
+
# Template self image is the 1st entry
if imgUUID != tImgs[0] and tName not in dstAllVols.keys():
- self.log.error("img %s can't be moved to dom %s because "
- "template %s is absent on it", imgUUID, dstDom.sdUUID, tName)
+ self.log.error("Cannot move image %s to domain %s "
+ "because the template %s is missing on the "
+ "destination", imgUUID, dstDom.sdUUID, tName)
raise se.ImageDoesNotExistInSD(imgUUID, dstDom.sdUUID)
- elif imgUUID == tImgs[0] and not srcDom.isBackup():
+ elif (safeToMove and imgUUID == tImgs[0]
+ and not srcDom.isBackup()):
raise se.MoveTemplateImageError(imgUUID)
+
break
return True
@@ -1333,7 +1340,8 @@
srcDom = self.validateSdUUID(srcDomUUID)
dstDom = self.validateSdUUID(dstDomUUID)
pool = self.getPool(spUUID) #Validates that the pool is connected. WHY?
- self.validateImageMove(srcDom, dstDom, imgUUID)
+ self.validateImageCopy(srcDom, dstDom, imgUUID,
+ safeToMove=(op != image.COPY_OP))
domains = [srcDomUUID, dstDomUUID]
domains.sort()
@@ -1364,7 +1372,7 @@
images = {}
for (imgUUID, pZero) in imgDict.iteritems():
images[imgUUID.strip()] = misc.parseBool(pZero)
- self.validateImageMove(srcDom, dstDom, imgUUID)
+ self.validateImageCopy(srcDom, dstDom, imgUUID, safeToMove=True)
domains = sorted([srcDomUUID, dstDomUUID])
for dom in domains:
--
To view, visit http://gerrit.ovirt.org/8408
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I9e07d569eec02ac4dcd386a7c576342f640ec242
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Federico Simoncelli <fsimonce(a)redhat.com>
10 years
Change in vdsm[master]: [WIP] Simplifiying hsm.copyImage logic.
by ewarszaw@redhat.com
Eduardo has uploaded a new change for review.
Change subject: [WIP] Simplifiying hsm.copyImage logic.
......................................................................
[WIP] Simplifiying hsm.copyImage logic.
Consider further optimization moving the
*.validateCreateVolumeParams() to the domain level.
Change-Id: I5db9053dabb97423611634e2bdfbdd09ec02876b
Signed-off-by: Eduardo <ewarszaw(a)redhat.com>
---
M vdsm/storage/hsm.py
M vdsm/storage/sd.py
2 files changed, 14 insertions(+), 27 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/09/8509/1
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 2a8bc59..2c031f8 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -1386,35 +1386,29 @@
Create new template/volume from VM.
Do it by collapse and copy the whole chain (baseVolUUID->srcVolUUID)
"""
- argsStr = "sdUUID=%s, spUUID=%s, vmUUID=%s, srcImgUUID=%s, srcVolUUID=%s, dstImgUUID=%s, "\
- "dstVolUUID=%s, description=%s, dstSdUUID=%s, volType=%s, volFormat=%s, "\
- "preallocate=%s force=%s, postZero=%s" % (sdUUID, spUUID, vmUUID,
- srcImgUUID, srcVolUUID, dstImgUUID, dstVolUUID, description,
- dstSdUUID, volType, volFormat, preallocate, force, postZero)
+ argsStr = str(locals())
vars.task.setDefaultException(se.TemplateCreationError("%s" % argsStr))
+ if dstSdUUID == sd.BLANK_UUID:
+ dstSdUUID = sdUUID
+
+ if dstSdUUID != sdUUID:
+ domains = (sdUUID, dstSdUUID)
# Validate imgUUID in case of copy inside source domain itself
- if dstSdUUID in (sdUUID, sd.BLANK_UUID):
- if srcImgUUID == dstImgUUID:
+ elif srcImgUUID == dstImgUUID:
raise se.InvalidParameterException("dstImgUUID", dstImgUUID)
+ else:
+ domains = (sdUUID)
+
pool = self.getPool(spUUID)
- self.validateSdUUID(sdUUID)
+ for dom in domains:
+ self.validateSdUUID(domains)
# Avoid VM copy if one of its volume (including template if exists) ILLEGAL/FAKE
pool.validateVolumeChain(sdUUID, srcImgUUID)
# Validate volume type and format
- if dstSdUUID != sd.BLANK_UUID:
- dom = dstSdUUID
- else:
- dom = sdUUID
- sdCache.produce(dom).validateCreateVolumeParams(volFormat, preallocate, volume.BLANK_UUID)
+ sdCache.produce(dstSdUUID).validateCreateVolumeParams(volFormat, preallocate, volume.BLANK_UUID)
- # If dstSdUUID defined, means we copy image to it
- domains = [sdUUID]
- if dstSdUUID not in [sdUUID, sd.BLANK_UUID]:
- self.validateSdUUID(dstSdUUID)
- domains.append(dstSdUUID)
- domains.sort()
-
+ domains.sort()
for dom in domains:
vars.task.getSharedLock(STORAGE, dom)
diff --git a/vdsm/storage/sd.py b/vdsm/storage/sd.py
index fcb796c..9c7fd52 100644
--- a/vdsm/storage/sd.py
+++ b/vdsm/storage/sd.py
@@ -369,13 +369,6 @@
"""
pass
- @classmethod
- def validateCreateVolumeParams(cls, volFormat, preallocate, srcVolUUID):
- """
- Validate create volume parameters
- """
- pass
-
def createVolume(self, imgUUID, size, volFormat, preallocate, diskType,
volUUID, desc, srcImgUUID, srcVolUUID):
"""
--
To view, visit http://gerrit.ovirt.org/8509
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I5db9053dabb97423611634e2bdfbdd09ec02876b
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Eduardo <ewarszaw(a)redhat.com>
10 years
Change in vdsm[master]: vm: refresh all volumes after migration
by Federico Simoncelli
Federico Simoncelli has uploaded a new change for review.
Change subject: vm: refresh all volumes after migration
......................................................................
vm: refresh all volumes after migration
After the vm live migration is completed the volumes must be refreshed
to comply with a possible volume extension happened after the migration
started.
Change-Id: Ibe0bd784e68f162b7902bb6ae50ff183d9f8fa8a
Signed-off-by: Federico Simoncelli <fsimonce(a)redhat.com>
---
M vdsm/libvirtvm.py
M vdsm/vm.py
2 files changed, 32 insertions(+), 7 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/18/8518/1
diff --git a/vdsm/libvirtvm.py b/vdsm/libvirtvm.py
index 9f865de..729a288 100644
--- a/vdsm/libvirtvm.py
+++ b/vdsm/libvirtvm.py
@@ -1634,6 +1634,23 @@
else:
self._monitorResponse = 0
+ def refreshAllDrives(self):
+ volInfoList = []
+
+ for device in self._devices[vm.DISK_DEVICES][:]:
+ if not device.blockDev or not hasattr(device, 'domainID'):
+ continue
+
+ volInfoList.append({
+ 'domainID': device.domainID,
+ 'poolID': device.poolID,
+ 'imageID': device.imageID,
+ 'volumeID': device.volumeID,
+ })
+
+ if volInfoList:
+ self.refreshVolumes(volInfoList)
+
def _waitForIncomingMigrationFinish(self):
if 'restoreState' in self.conf:
self.cont()
@@ -1667,8 +1684,11 @@
del self.conf['guestIPs']
if 'username' in self.conf:
del self.conf['username']
+
self.saveState()
- self.log.debug("End of migration")
+
+ self.log.debug("Migration ended, refreshing all the drive volumes")
+ self.refreshAllDrives()
def _underlyingCont(self):
hooks.before_vm_cont(self._dom.XMLDesc(0), self.conf)
diff --git a/vdsm/vm.py b/vdsm/vm.py
index 5ea2859..a3c6839 100644
--- a/vdsm/vm.py
+++ b/vdsm/vm.py
@@ -742,16 +742,21 @@
else:
self.__extendDriveVolume(vmDrive, newSize)
- def __refreshDriveVolume(self, volInfo):
+ def refreshVolumes(self, volInfoList):
""" Stop vm before refreshing LV. """
-
self._guestCpuLock.acquire()
try:
wasRunning = self._guestCpuRunning
+
if wasRunning:
self.pause(guestCpuLocked=True)
- self.cif.irs.refreshVolume(volInfo['domainID'],
- volInfo['poolID'], volInfo['imageID'], volInfo['volumeID'])
+
+ for volInfo in volInfoList:
+ self.cif.irs.refreshVolume(
+ volInfo['domainID'], volInfo['poolID'],
+ volInfo['imageID'], volInfo['volumeID']
+ )
+
if wasRunning:
self.cont(guestCpuLocked=True)
finally:
@@ -759,7 +764,7 @@
def __afterReplicaExtend(self, volInfo):
self.log.debug("Refreshing replica volume: %s", volInfo)
- self.__refreshDriveVolume(volInfo)
+ self.refreshVolumes((volInfo,))
vmDrive = self._findDriveByName(volInfo['name'])
@@ -778,7 +783,7 @@
def __afterDriveExtend(self, volInfo):
self.log.debug("Refreshing drive volume: %s", volInfo)
- self.__refreshDriveVolume(volInfo)
+ self.refreshVolumes((volInfo,))
vmDrive = self._findDriveByName(volInfo['name'])
res = self.cif.irs.getVolumeSize(vmDrive.domainID, vmDrive.poolID,
--
To view, visit http://gerrit.ovirt.org/8518
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ibe0bd784e68f162b7902bb6ae50ff183d9f8fa8a
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Federico Simoncelli <fsimonce(a)redhat.com>
10 years