Change in vdsm[master]: gluster: geo-replication configuration related verbs.
by dnarayan@redhat.com
Darshan N has uploaded a new change for review.
Change subject: gluster: geo-replication configuration related verbs.
......................................................................
gluster: geo-replication configuration related verbs.
This patch adds four verbs related to geo-replication
configuration.
Verbs:
* glusterVolumeGeoRepConfigList
- Lists all the configuration for the session.
Returns:
{georepConfig:[{'configName': configuration name,
'description': value}]}
* glusterVolumeGeoRepConfigGet
- Gives the configuration value for a specified
configuration.
Returns:
{georepConfig:{'configName': configuration name,
'description': value}}
* glusterVolumeGeoRepConfigSet
- Sets the configuration value for a given
configuration.
Returns:bool
* glusterVolumeGeoRepConfigSetDefault
- Sets the configuration to its default value
Returns:bool
Change-Id: Ifee571dbf3c56633e415d32f8d518cb64aaf1add
Signed-off-by: ndarshan <dnarayan(a)redhat.com>
---
M client/vdsClientGluster.py
M vdsm/gluster/api.py
M vdsm/gluster/cli.py
M vdsm/gluster/exception.py
M vdsm/gluster/vdsmapi-gluster-schema.json
5 files changed, 354 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/62/30162/1
diff --git a/client/vdsClientGluster.py b/client/vdsClientGluster.py
index 421c853..4c65707 100644
--- a/client/vdsClientGluster.py
+++ b/client/vdsClientGluster.py
@@ -430,6 +430,70 @@
pp.pprint(status)
return status['status']['code'], status['status']['message']
+ def do_glusterVolumeGeoRepConfigList(self, args):
+ params = self._eqSplit(args)
+ masterVolName = params.get('volumeName', '')
+ slaveHost = params.get('slaveHost', '')
+ slaveVolName = params.get('slaveVolName', '')
+ if not(masterVolName and slaveHost and slaveVolName):
+ raise ValueError
+
+ status = self.s.glusterVolumeGeoRepConfigList(masterVolName,
+ slaveHost,
+ slaveVolName)
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeGeoRepConfigSet(self, args):
+ params = self._eqSplit(args)
+ masterVolName = params.get('volumeName', '')
+ slaveHost = params.get('slaveHost', '')
+ slaveVolName = params.get('slaveVolName', '')
+ key = params.get('key', '')
+ value = params.get('value', '')
+ if not(masterVolName and slaveHost and slaveVolName and key and value):
+ raise ValueError
+
+ status = self.s.glusterVolumeGeoRepConfigSet(masterVolName,
+ slaveHost,
+ slaveVolName,
+ key,
+ value)
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeGeoRepConfigGet(self, args):
+ params = self._eqSplit(args)
+ masterVolName = params.get('volumeName', '')
+ slaveHost = params.get('slaveHost', '')
+ slaveVolName = params.get('slaveVolName', '')
+ key = params.get('key', '')
+ if not(masterVolName and slaveHost and slaveVolName and key):
+ raise ValueError
+
+ status = self.s.glusterVolumeGeoRepConfigGet(masterVolName,
+ slaveHost,
+ slaveVolName,
+ key)
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeGeoRepConfigSetDefault(self, args):
+ params = self._eqSplit(args)
+ masterVolName = params.get('volumeName', '')
+ slaveHost = params.get('slaveHost', '')
+ slaveVolName = params.get('slaveVolName', '')
+ key = params.get('key', '')
+ if not(masterVolName and slaveHost and slaveVolName and key):
+ raise ValueError
+
+ status = self.s.glusterVolumeGeoRepConfigSetDefault(masterVolName,
+ slaveHost,
+ slaveVolName,
+ key)
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
def getGlusterCmdDict(serv):
return \
@@ -731,4 +795,55 @@
('volumeName=<volume name>',
'Returns total, free and used space(bytes) of gluster volume'
)),
+ 'glusterVolumeGeoRepConfigList': (
+ serv.do_glusterVolumeGeoRepConfigList,
+ ('volumeName=<master_volume_name> slaveHost=<slave_host> '
+ 'slaveVolName=<slave_volume_name>\n\t'
+ '<master_volume_name> is an existing volume name in '
+ 'the master node\n\t'
+ '<slave_host> is slave host name\n\t'
+ '<slave_volume_name> is an existing volume name '
+ 'in the slave node',
+ 'list volume geo-replication configurations'
+ )),
+ 'glusterVolumeGeoRepConfigSet': (
+ serv.do_glusterVolumeGeoRepConfigSet,
+ ('volumeName=<master_volume_name> slaveHost=<slave_host> '
+ 'slaveVolName=<slave_volume_name> keyName=<key> '
+ 'value=<value>\n\t'
+ '<master_volume_name> is an existing volume name '
+ 'in the master node\n\t'
+ '<slave_host> is slave host name\n\t'
+ '<slave_volume_name> is an existing volume name in '
+ 'the slave node\n\t'
+ '<key> is the key name of configuration\n\t'
+ '<value> is the key value',
+ 'set volume geo-replication configuration'
+ )),
+ 'glusterVolumeGeoRepConfigGet': (
+ serv.do_glusterVolumeGeoRepConfigGet,
+ ('volumeName=<master_volume_name> slaveHost=<slave_host> '
+ 'slaveVolName=<slave_volume_name> keyName=<key> '
+ 'value=<value>\n\t'
+ '<master_volume_name> is an existing volume name in '
+ 'the master node\n\t'
+ '<slave_host> is slave host name\n\t'
+ '<slave_volume_name> is an existing volume name in '
+ 'the slave node\n\t'
+ '<key> is the key name of configuration',
+ 'get volume geo-replication configuration'
+ )),
+ 'glusterVolumeGeoRepConfigSetDefault': (
+ serv.do_glusterVolumeGeoRepConfigSetDefault,
+ ('masterVolName=<master_volume_name> slaveHost=<slave_host> '
+ 'slaveVolName=<slave_volume_name> keyName=<key> '
+ 'value=<value>\n\t'
+ '<master_volume_name> is an existing volume name in '
+ 'the master node\n\t'
+ '<slave_host> is slave host name\n\t'
+ '<slave_volume_name> is an existing volume name in '
+ 'the slave node\n\t'
+ '<key> is the key name',
+ 'Set volume geo-replication configuration to devault'
+ ))
}
diff --git a/vdsm/gluster/api.py b/vdsm/gluster/api.py
index 0e6c850..5b3eec7 100644
--- a/vdsm/gluster/api.py
+++ b/vdsm/gluster/api.py
@@ -322,6 +322,46 @@
data = self.svdsmProxy.glusterVolumeStatvfs(volumeName)
return self._computeVolumeStats(data)
+ @exportAsVerb
+ def volumeGeoRepConfigList(self, masterVolName, slaveHost, slaveVolName,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeGeoRepConfigList(masterVolName,
+ slaveHost,
+ slaveVolName)
+ return {'geoRepConfig': status}
+
+ @exportAsVerb
+ def volumeGeoRepConfigSet(self, masterVolName, slaveHost, slaveVolName,
+ key, value, options=None):
+ status = self.svdsmProxy.glusterVolumeGeoRepConfigSet(masterVolName,
+ slaveHost,
+ slaveVolName,
+ key,
+ value)
+ return {'geoRepSet': status}
+
+ @exportAsVerb
+ def volumeGeoRepConfigGet(self, masterVolName, slaveHost, slaveVolName,
+ key, options=None):
+
+ status = self.svdsmProxy.glusterVolumeGeoRepConfigGet(masterVolName,
+ slaveHost,
+ slaveVolName,
+ key)
+ return {'geoRepGet': status}
+
+ @exportAsVerb
+ def volumeGeoRepConfigSetDefault(self, masterVolName, slaveHost,
+ slaveVolName,
+ key, options=None):
+
+ status = self.svdsmProxy.glusterVolumeGeoRepConfigSetDefault(
+ masterVolName,
+ slaveHost,
+ slaveVolName,
+ key)
+ return {'geoRepSetDefault': status}
+
def getGlusterMethods(gluster):
l = []
diff --git a/vdsm/gluster/cli.py b/vdsm/gluster/cli.py
index 2e1c9a9..6a4ffd6 100644
--- a/vdsm/gluster/cli.py
+++ b/vdsm/gluster/cli.py
@@ -1054,3 +1054,74 @@
return _parseVolumeTasks(xmltree)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
+
+
+def _parseVolumeGeoRepConfigList(tree):
+ """
+ Returns:
+ {geoRepConfig:[{'option name': name of the configuration,
+ 'description': value of the configuration}]
+ }
+ """
+ result = {}
+ conf = tree.find('geoRep/config')
+ config_list = []
+ for child in conf.getchildren():
+ config = {}
+ config['optionName'] = child.tag
+ config['description'] = child.text
+ config_list.append(config)
+ result['geoRepConfig'] = config_list
+ return result
+
+
+@makePublic
+def volumeGeoRepConfigList(masterVolName, slaveHost, slaveVolName):
+ command = _getGlusterVolCmd() + ["geo-replication", masterVolName,
+ "%s::%s" % (slaveHost, slaveVolName),
+ "config"]
+ try:
+ xmltree = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException as e:
+ raise ge.GlusterGeoRepConfigListFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseVolumeGeoRepConfigList(xmltree)
+ except _etreeExceptions:
+ raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
+
+
+@makePublic
+def volumeGeoRepConfigSet(masterVolName, slaveHost, slaveVolName, key, value):
+ command = _getGlusterVolCmd() + ["geo-replication", masterVolName,
+ "%s::%s" % (slaveHost, slaveVolName),
+ "config", key, value]
+ rc, out, err = _execGluster(command)
+ if rc:
+ raise ge.GlusterGeoRepConfigSetFailedException(rc, out, err)
+ return True
+
+
+@makePublic
+def volumeGeoRepConfigGet(masterVolName, slaveHost, slaveVolName, key):
+ command = _getGlusterVolCmd() + ["geo-replication", masterVolName,
+ "%s::%s" % (slaveHost, slaveVolName),
+ "config", key]
+ try:
+ xmltree = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException as e:
+ raise ge.GlusterGeoRepConfigGetFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseVolumeGeoRepConfigList(xmltree)
+ except _etreeExceptions:
+ raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
+
+
+@makePublic
+def volumeGeoRepConfigSetDefault(masterVolName, slaveHost, slaveVolName, key):
+ command = _getGlusterVolCmd() + ["geo-replication", masterVolName,
+ "%s::%s" % (slaveHost, slaveVolName),
+ "config", "!%s" % key]
+ rc, out, err = _execGluster(command)
+ if rc:
+ raise ge.GlusterGeoRepConfigSetDefaultFailedException(rc, out, err)
+ return True
diff --git a/vdsm/gluster/exception.py b/vdsm/gluster/exception.py
index 0205cb1..ca6b7cf 100644
--- a/vdsm/gluster/exception.py
+++ b/vdsm/gluster/exception.py
@@ -506,3 +506,29 @@
class GlfsFiniException(GlusterLibgfapiException):
code = 4573
message = "glfs fini failed"
+
+
+#geo-replication
+class GlusterGeoRepException(GlusterException):
+ code = 4560
+ message = "Gluster Geo-Replication Exception"
+
+
+class GlusterGeoRepConfigListFailedException(GlusterVolumeException):
+ code = 4169
+ message = "Get volume geo-replication config list failed"
+
+
+class GlusterGeoRepConfigSetFailedException(GlusterVolumeException):
+ code = 4170
+ message = "Set volume geo-replication config failed"
+
+
+class GlusterGeoRepConfigGetFailedException(GlusterVolumeException):
+ code = 4171
+ message = "Get volume geo-replication config failed"
+
+
+class GlusterGeoRepConfigSetDefaultFailedException(GlusterVolumeException):
+ code = 4172
+ message = "Set volume geo-replication config to default failed"
diff --git a/vdsm/gluster/vdsmapi-gluster-schema.json b/vdsm/gluster/vdsmapi-gluster-schema.json
index 4ddd182..06de6ca 100644
--- a/vdsm/gluster/vdsmapi-gluster-schema.json
+++ b/vdsm/gluster/vdsmapi-gluster-schema.json
@@ -1238,3 +1238,105 @@
{'command': {'class': 'GlusterVolume', 'name': 'statsInfoGet'},
'data': {'volumeName': 'str'},
'returns': 'GlusterVolumeStatsInfo'}
+
+# @GeoRepConfig:
+#
+# Geo replication config details.
+#
+# @optionname: Config option name
+#
+# @description: Option details
+#
+# Since: 4.16.0
+##
+{'type': 'GeoRepConfig',
+ 'data': {'optionname': 'str', 'description': 'str'}}
+
+##
+# @GlusterGeoRep.configList:
+#
+# List Geo Replication configuration
+#
+# @mastervolname: is an existing volume name in the master node
+#
+# @slavehost: is remote slave host name or ip
+#
+# @slavevolname: is an available existing volume name in the slave node
+#
+# Returns:
+# List of geo replication configurations
+#
+# Since: 4.16.0
+##
+{'command': {'class': 'GlusterGeoRep', 'name': 'geoRepConfigList'},
+ 'data': {'mastervolname': 'str', 'slavehost': 'str', 'slavevolname': 'str'},
+ 'returns': 'GeoRepConfig'}
+
+##
+# @GlusterGeoRep.configSet:
+#
+# Set Geo Replication config option
+#
+# @mastervolname: is an existing volume name in the master node
+#
+# @slavehost: is remote slave host name or ip
+#
+# @slavevolname: is an available existing volume name in the slave node
+#
+# @key: valid configuration option name
+#
+# @value: value to the option
+#
+# Returns:
+# True if it sets value to the option successfully
+#
+# Since: 4.16.0
+##
+{'command': {'class': 'GlusterGeoRep', 'name': 'geoRepConfigSet'},
+ 'data': {'mastervolname': 'str', 'slavehost': 'str', 'slavevolname': 'str', 'key': 'str', 'value': 'str'},
+ 'returns': 'bool'}
+
+##
+# @GlusterVolume.geoRepConfigGet:
+#
+# Get value of the Geo Replication config option
+#
+# @mastervolname: is an existing volume name in the master node
+#
+# @slavehost: is remote slave host name or ip
+#
+# @slavevolname: is an available existing volume name in the slave node
+#
+# @key: valid configuration option name
+#
+# Returns:
+# The value of the Geo Replication config option
+#
+# Since: 4.16.0
+##
+{'command': {'class': 'GlusterGeoRep', 'name': 'geoRepConfigGet'},
+ 'data': {'mastervolname': 'str', 'slavehost': 'str', 'slavevolname': 'str', 'key': 'str'},
+ 'returns': 'GeoRepConfig'}
+
+##
+# @GlusterVolume.geoRepConfigSetDefault:
+#
+# Resets the the config option to default value
+#
+# @mastervolname: is an existing volume name in the master node
+#
+# @slavehost: is remote slave host name or ip
+#
+# @slavevolname: is an available existing volume name in the slave node
+#
+# @key: valid configuration option name
+#
+# Returns:
+# True if default value is set successfully
+#
+# Since: 4.16.0
+##
+{'command': {'class': 'GlusterGeoRep', 'name': 'geoRepConfigSetDefault'},
+ 'data': {'mastervolname': 'str', 'slavehost': 'str', 'slavevolname': 'str', 'key': 'str'},
+ 'returns': 'bool'}
+
--
To view, visit http://gerrit.ovirt.org/30162
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ifee571dbf3c56633e415d32f8d518cb64aaf1add
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Darshan N <dnarayan(a)redhat.com>
9 years, 3 months
Change in vdsm[master]: storage.monitor: Streamline names
by Nir Soffer
Nir Soffer has uploaded a new change for review.
Change subject: storage.monitor: Streamline names
......................................................................
storage.monitor: Streamline names
Rename internal classes using shorter names, considering the package
namespace. For example:
storage.monitor.DomainMonitorStatus -> storage.monitor.Status
In this case the name is more correct, as this is not the status of the
single DomainMonitor object but status of one of the monitor threads.
Since DomainMonitorThread was renamed, the logger was renamed too. We do
not need muliple loggers for the DomainMonitor and the MonitorThread, so
both of them use now the Storage.Monitor logger.
Change-Id: I8eae2f8548ebd1b336df10aea7dde47a4aeabf70
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
M tests/storageMonitorTests.py
M vdsm/storage/monitor.py
2 files changed, 10 insertions(+), 10 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/07/32507/1
diff --git a/tests/storageMonitorTests.py b/tests/storageMonitorTests.py
index 112d739..bbd152f 100644
--- a/tests/storageMonitorTests.py
+++ b/tests/storageMonitorTests.py
@@ -25,7 +25,7 @@
class FrozenStatusTests(VdsmTestCase):
def setUp(self):
- self.status = monitor.DomainMonitorStatus()
+ self.status = monitor.Status()
self.frozen = monitor.FrozenStatus(self.status)
def test_copy_attributes(self):
diff --git a/vdsm/storage/monitor.py b/vdsm/storage/monitor.py
index c4f5419..09f8084 100644
--- a/vdsm/storage/monitor.py
+++ b/vdsm/storage/monitor.py
@@ -31,7 +31,7 @@
from .sdc import sdCache
-class DomainMonitorStatus(object):
+class Status(object):
__slots__ = (
"error", "checkTime", "valid", "readDelay", "masterMounted",
"masterValid", "diskUtilization", "vgMdUtilization",
@@ -62,7 +62,7 @@
self.version = -1
-class FrozenStatus(DomainMonitorStatus):
+class FrozenStatus(Status):
def __init__(self, other):
for name in other.__slots__:
@@ -76,7 +76,7 @@
class DomainMonitor(object):
- log = logging.getLogger('Storage.DomainMonitor')
+ log = logging.getLogger('Storage.Monitor')
def __init__(self, interval):
self._monitors = {}
@@ -101,8 +101,8 @@
return
self.log.info("Start monitoring %s", sdUUID)
- monitor = DomainMonitorThread(weakref.proxy(self),
- sdUUID, hostId, self._interval)
+ monitor = MonitorThread(weakref.proxy(self), sdUUID, hostId,
+ self._interval)
monitor.poolDomain = poolDomain
monitor.start()
# The domain should be added only after it succesfully started
@@ -158,8 +158,8 @@
monitor.sdUUID)
-class DomainMonitorThread(object):
- log = logging.getLogger('Storage.DomainMonitorThread')
+class MonitorThread(object):
+ log = logging.getLogger('Storage.Monitor')
def __init__(self, domainMonitor, sdUUID, hostId, interval):
self.thread = threading.Thread(target=self._run)
@@ -171,7 +171,7 @@
self.hostId = hostId
self.interval = interval
self.firstChange = True
- self.nextStatus = DomainMonitorStatus()
+ self.nextStatus = Status()
self.status = FrozenStatus(self.nextStatus)
self.isIsoDomain = None
self.isoPrefix = None
@@ -223,7 +223,7 @@
self.stopEvent.wait(self.interval)
def _monitorDomain(self):
- self.nextStatus = DomainMonitorStatus()
+ self.nextStatus = Status()
# Pick up changes in the domain, for example, domain upgrade.
if self._shouldRefreshDomain():
--
To view, visit http://gerrit.ovirt.org/32507
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I8eae2f8548ebd1b336df10aea7dde47a4aeabf70
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Nir Soffer <nsoffer(a)redhat.com>
9 years, 3 months
Change in vdsm[master]: storage.monitor: Use lowercase module name
by Nir Soffer
Nir Soffer has uploaded a new change for review.
Change subject: storage.monitor: Use lowercase module name
......................................................................
storage.monitor: Use lowercase module name
storage.domainMonitor was using pep8 incompatible and too specific
module name.
Change-Id: Iae1686411a7cfa72580cb6e97bbf54362335dab3
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
M debian/vdsm.install
M tests/Makefile.am
R tests/storageMonitorTests.py
M vdsm.spec.in
M vdsm/storage/Makefile.am
M vdsm/storage/hsm.py
R vdsm/storage/monitor.py
7 files changed, 9 insertions(+), 9 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/06/32506/1
diff --git a/debian/vdsm.install b/debian/vdsm.install
index b1840ec..78d75de 100644
--- a/debian/vdsm.install
+++ b/debian/vdsm.install
@@ -94,7 +94,7 @@
./usr/share/vdsm/storage/curlImgWrap.py
./usr/share/vdsm/storage/devicemapper.py
./usr/share/vdsm/storage/dispatcher.py
-./usr/share/vdsm/storage/domainMonitor.py
+./usr/share/vdsm/storage/monitor.py
./usr/share/vdsm/storage/fileSD.py
./usr/share/vdsm/storage/fileUtils.py
./usr/share/vdsm/storage/fileVolume.py
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 3a0bbd5..e78846c 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -30,7 +30,6 @@
capsTests.py \
clientifTests.py \
configNetworkTests.py \
- domainMonitorTests.py \
fileVolumeTests.py \
fileUtilTests.py \
fuserTests.py \
@@ -71,6 +70,7 @@
sslhelper.py \
sslTests.py \
storageMailboxTests.py \
+ storageMonitorTests.py \
tcTests.py \
testlibTests.py \
toolTests.py \
diff --git a/tests/domainMonitorTests.py b/tests/storageMonitorTests.py
similarity index 90%
rename from tests/domainMonitorTests.py
rename to tests/storageMonitorTests.py
index b4b8d39..112d739 100644
--- a/tests/domainMonitorTests.py
+++ b/tests/storageMonitorTests.py
@@ -18,15 +18,15 @@
# Refer to the README and COPYING files for full details of the license
#
-from storage import domainMonitor
+from storage import monitor
from testlib import VdsmTestCase
class FrozenStatusTests(VdsmTestCase):
def setUp(self):
- self.status = domainMonitor.DomainMonitorStatus()
- self.frozen = domainMonitor.FrozenStatus(self.status)
+ self.status = monitor.DomainMonitorStatus()
+ self.frozen = monitor.FrozenStatus(self.status)
def test_copy_attributes(self):
for name in self.status.__slots__:
diff --git a/vdsm.spec.in b/vdsm.spec.in
index 323b586..e6e3bfa 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -1015,7 +1015,7 @@
%{_datadir}/%{vdsm_name}/storage/curlImgWrap.py*
%{_datadir}/%{vdsm_name}/storage/devicemapper.py*
%{_datadir}/%{vdsm_name}/storage/dispatcher.py*
-%{_datadir}/%{vdsm_name}/storage/domainMonitor.py*
+%{_datadir}/%{vdsm_name}/storage/monitor.py*
%{_datadir}/%{vdsm_name}/storage/fileSD.py*
%{_datadir}/%{vdsm_name}/storage/fileUtils.py*
%{_datadir}/%{vdsm_name}/storage/fileVolume.py*
diff --git a/vdsm/storage/Makefile.am b/vdsm/storage/Makefile.am
index 99b1460..fcbbb61 100644
--- a/vdsm/storage/Makefile.am
+++ b/vdsm/storage/Makefile.am
@@ -31,7 +31,6 @@
curlImgWrap.py \
devicemapper.py \
dispatcher.py \
- domainMonitor.py \
fileSD.py \
fileUtils.py \
fileVolume.py \
@@ -47,6 +46,7 @@
localFsSD.py \
lvm.py \
misc.py \
+ monitor.py \
mount.py \
multipath.py \
nfsSD.py \
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 722a858..7d009c6 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -43,7 +43,7 @@
from spbackends import MAX_POOL_DESCRIPTION_SIZE, MAX_DOMAINS
from spbackends import StoragePoolDiskBackend
from spbackends import StoragePoolMemoryBackend
-import domainMonitor
+import monitor
import sd
import blockSD
import nfsSD
@@ -392,7 +392,7 @@
storageRefreshThread.start()
monitorInterval = config.getint('irs', 'sd_health_check_delay')
- self.domainMonitor = domainMonitor.DomainMonitor(monitorInterval)
+ self.domainMonitor = monitor.DomainMonitor(monitorInterval)
@property
def ready(self):
diff --git a/vdsm/storage/domainMonitor.py b/vdsm/storage/monitor.py
similarity index 100%
rename from vdsm/storage/domainMonitor.py
rename to vdsm/storage/monitor.py
--
To view, visit http://gerrit.ovirt.org/32506
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Iae1686411a7cfa72580cb6e97bbf54362335dab3
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Nir Soffer <nsoffer(a)redhat.com>
9 years, 3 months
Change in vdsm[master]: assert: Replace assetion with AssertionError
by Nir Soffer
Nir Soffer has uploaded a new change for review.
Change subject: assert: Replace assetion with AssertionError
......................................................................
assert: Replace assetion with AssertionError
If Python is run with -O flag, assertions are removed. This may casue a
useful error message to be hidden, making it harder to debug, or worse,
code invoke inside an assert will not run, changing the flow of the
"optimized" code.
This patch replaces assertions with raising AssertionError, ensuring
that when the impossible happen, we will get an expection.
This patch, together with http://gerrit.ovirt.org/29302 replace all the
asserts in vdsm code. The rest of the assert are in tests where they
safe and useful.
Change-Id: I8c60c65b283f4343448bb9eaf6ccf2526b188db0
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
M lib/vdsm/libvirtconnection.py
M lib/vdsm/netinfo.py
M vdsm/rpc/BindingXMLRPC.py
M vdsm/rpc/process-schema.py
M vdsm/storage/misc.py
5 files changed, 25 insertions(+), 10 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/07/29307/1
diff --git a/lib/vdsm/libvirtconnection.py b/lib/vdsm/libvirtconnection.py
index b1332db..260de23 100644
--- a/lib/vdsm/libvirtconnection.py
+++ b/lib/vdsm/libvirtconnection.py
@@ -37,7 +37,8 @@
self.__thread = None
def start(self):
- assert not self.run
+ if self.run:
+ raise AssertionError("EventLoop is running")
self.__thread = threading.Thread(target=self.__run,
name="libvirtEventLoop")
self.__thread.setDaemon(True)
diff --git a/lib/vdsm/netinfo.py b/lib/vdsm/netinfo.py
index 8d4dc19..78055b4 100644
--- a/lib/vdsm/netinfo.py
+++ b/lib/vdsm/netinfo.py
@@ -986,8 +986,9 @@
def getBondingForNic(self, nic):
bondings = list(self.getBondingsForNic(nic))
if bondings:
- assert len(bondings) == 1, \
- "Unexpected configuration: More than one bonding per nic"
+ if len(bondings) != 1:
+ raise AssertionError("Unexpected configuration: More than "
+ "one bonding per nic")
return bondings[0]
return None
@@ -1005,13 +1006,19 @@
for port in ports:
if port in self.vlans:
- assert vlan is None
+ if vlan is not None:
+ raise AssertionError("Unexpected vlan: %s exepected: None"
+ % (vlan,))
nic = getVlanDevice(port)
vlan = getVlanID(port)
- assert self.vlans[port]['iface'] == nic
+ if self.vlans[port]['iface'] != nic:
+ raise AssertionError("Unexpected iface: %s expected: %s" %
+ (self.vlans[port]['iface'], nic))
port = nic
if port in self.bondings:
- assert bonding is None
+ if bonding is not None:
+ raise AssertionError("Unexpected bonding: %s expected: "
+ "None" % bonding)
bonding = port
lnics += self.bondings[bonding]['slaves']
elif port in self.nics:
diff --git a/vdsm/rpc/BindingXMLRPC.py b/vdsm/rpc/BindingXMLRPC.py
index 42aad1b..ffe233d 100644
--- a/vdsm/rpc/BindingXMLRPC.py
+++ b/vdsm/rpc/BindingXMLRPC.py
@@ -1088,7 +1088,8 @@
logLevel = logging.TRACE
displayArgs = args
if f.__name__ == 'vmDesktopLogin':
- assert 'password' not in kwargs
+ if 'password' in kwargs:
+ raise AssertionError("Unexpected kwarg: password")
if len(args) > 3:
displayArgs = args[:3] + ('****',) + args[4:]
diff --git a/vdsm/rpc/process-schema.py b/vdsm/rpc/process-schema.py
index ee88d93..51ec55c 100755
--- a/vdsm/rpc/process-schema.py
+++ b/vdsm/rpc/process-schema.py
@@ -103,7 +103,9 @@
'xxx': []})
# Pop a blank line
- assert('' == lines.pop(0))
+ line = lines.pop(0)
+ if line != '':
+ raise AssertionError("Expected empty line: %s" % line)
# Grab the entity description. It might span multiple lines.
symbol['desc'] = lines.pop(0)
@@ -111,7 +113,9 @@
symbol['desc'] += lines.pop(0)
# Pop a blank line
- assert ('' == lines.pop(0))
+ line = lines.pop(0)
+ if line != '':
+ raise AssertionError("Expected empty line: %s" % line)
# Populate the rest of the human-readable data.
# First try to read the parameters/members information. We are finished
diff --git a/vdsm/storage/misc.py b/vdsm/storage/misc.py
index d0869ee..4e7d9ba 100644
--- a/vdsm/storage/misc.py
+++ b/vdsm/storage/misc.py
@@ -697,7 +697,9 @@
def exit(self):
with self._cond:
- assert self._busy, "Attempt to exit a barrier without entering"
+ if not self._busy:
+ raise AssertionError("Attempt to exit a barrier without "
+ "entering")
self._busy = False
self._cond.notifyAll()
--
To view, visit http://gerrit.ovirt.org/29307
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I8c60c65b283f4343448bb9eaf6ccf2526b188db0
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Nir Soffer <nsoffer(a)redhat.com>
9 years, 3 months
Change in vdsm[master]: mailbox: Log traceback after fatal failures in mailbox monitors
by Nir Soffer
Nir Soffer has uploaded a new change for review.
Change subject: mailbox: Log traceback after fatal failures in mailbox monitors
......................................................................
mailbox: Log traceback after fatal failures in mailbox monitors
Mailbox monitor threads were not logging fatal error properly. Now these
threads use our standard @traceback decorator to ensure that failures
are logged properly.
The message when stopping the spm mailbox thread normally was moved out
of the try finally block, to ensure that it shows only for normal
shutdown, and not for unexpected death of the thread, where we will have
a clear traceback.
Change-Id: I68957ca745018349cae488acfe252e902c2af3ae
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
M vdsm/storage/storage_mailbox.py
1 file changed, 7 insertions(+), 2 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/53/29853/1
diff --git a/vdsm/storage/storage_mailbox.py b/vdsm/storage/storage_mailbox.py
index c31018a..59d0060 100644
--- a/vdsm/storage/storage_mailbox.py
+++ b/vdsm/storage/storage_mailbox.py
@@ -36,6 +36,7 @@
from threadPool import ThreadPool
from storage_exception import InvalidParameterException
from vdsm import constants
+from vdsm import utils
__author__ = "ayalb"
__date__ = "$Mar 9, 2009 5:25:07 PM$"
@@ -425,6 +426,8 @@
MESSAGES_PER_MAILBOX,
repr(self._outgoingMail[start:end])))
+ @utils.traceback(on=log.name,
+ msg="Unhandled exception in HSM_MailMonitor thread")
def run(self):
try:
failures = 0
@@ -778,6 +781,8 @@
finally:
self._outLock.release()
+ @utils.traceback(on=log.name,
+ msg="Unhandled exception in SPM_MailMonitor thread")
def run(self, *args):
try:
while not self._stop:
@@ -788,8 +793,8 @@
self._inLock.release()
self.log.error("Error checking for mail", exc_info=True)
time.sleep(self._monitorInterval)
+ self.log.info("SPM_MailMonitor - Incoming mail monitoring thread "
+ "stopped")
finally:
self._stopped = True
self.tp.joinAll(waitForTasks=False)
- self.log.info("SPM_MailMonitor - Incoming mail monitoring thread "
- "stopped")
--
To view, visit http://gerrit.ovirt.org/29853
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I68957ca745018349cae488acfe252e902c2af3ae
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Nir Soffer <nsoffer(a)redhat.com>
9 years, 3 months
Change in vdsm[master]: gluster: geo-replication pause and resume session
by dnarayan@redhat.com
Darshan N has uploaded a new change for review.
Change subject: gluster: geo-replication pause and resume session
......................................................................
gluster: geo-replication pause and resume session
This patch adds two new verbs to pause and resume
geo-replication session between master and slave
volume. It returns a boolean output.
Change-Id: I024bcee148bab1e713e1bc5c73d288613c466656
Signed-off-by: ndarshan <dnarayan(a)redhat.com>
---
M client/vdsClientGluster.py
M vdsm/gluster/api.py
M vdsm/gluster/cli.py
M vdsm/gluster/exception.py
M vdsm/gluster/vdsmapi-gluster-schema.json
5 files changed, 157 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/69/31069/1
diff --git a/client/vdsClientGluster.py b/client/vdsClientGluster.py
index 421c853..bd7e107 100644
--- a/client/vdsClientGluster.py
+++ b/client/vdsClientGluster.py
@@ -430,6 +430,34 @@
pp.pprint(status)
return status['status']['code'], status['status']['message']
+ def do_glusterVolumeGeoRepPause(self, args):
+ params = self._eqSplit(args)
+ volumeName = params.get('volumeName', '')
+ remoteHost = params.get('remoteHost', '')
+ remoteVolumeName = params.get('remoteVolumeName', '')
+ force = (params.get('force', 'no').upper() == 'YES')
+
+ status = self.s.glusterVolumeGeoRepPause(volumeName,
+ remoteHost,
+ remoteVolumeName,
+ force)
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeGeoRepResume(self, args):
+ params = self._eqSplit(args)
+ volumeName = params.get('volumeName', '')
+ remoteHost = params.get('remoteHost', '')
+ remoteVolumeName = params.get('remoteVolumeName', '')
+ force = (params.get('force', 'no').upper() == 'YES')
+
+ status = self.s.glusterVolumeGeoRepResume(volumeName,
+ remoteHost,
+ remoteVolumeName,
+ force)
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
def getGlusterCmdDict(serv):
return \
@@ -731,4 +759,26 @@
('volumeName=<volume name>',
'Returns total, free and used space(bytes) of gluster volume'
)),
+ 'glusterVolumeGeoRepPause': (
+ serv.do_glusterVolumeGeoRepPause,
+ ('volumeName=<master_volume_name> '
+ 'remoteHost=<slave_host_name> '
+ 'remoteVolumeName=<slave_volume_name> '
+ '[force={yes|no}]\n\t'
+ '<master_volume_name>existing volume name in the master node\n\t'
+ '<slave_host_name>is remote slave host name or ip\n\t'
+ '<slave_volume_name>existing volume name in the slave node',
+ 'Pause the geo-replication session'
+ )),
+ 'glusterVolumeGeoRepResume': (
+ serv.do_glusterVolumeGeoRepResume,
+ ('volumeName=<master_volume_name> '
+ 'remoteHost=<slave_host_name> '
+ 'remoteVolumeName=<slave_volume_name> '
+ '[force={yes|no}]\n\t'
+ '<master_volume_name>existing volume name in the master node\n\t'
+ '<slave_host_name>is remote slave host name or ip\n\t'
+ '<slave_volume_name>existing volume name in the slave node',
+ 'Resume the geo-replication session'
+ )),
}
diff --git a/vdsm/gluster/api.py b/vdsm/gluster/api.py
index 0e6c850..abe8ba0 100644
--- a/vdsm/gluster/api.py
+++ b/vdsm/gluster/api.py
@@ -322,6 +322,22 @@
data = self.svdsmProxy.glusterVolumeStatvfs(volumeName)
return self._computeVolumeStats(data)
+ @exportAsVerb
+ def volumeGeoRepPause(self, volumeName, remoteHost, remoteVolumeName,
+ force=False, options=None):
+ self.svdsmProxy.glusterVolumeGeoRepPause(volumeName,
+ remoteHost,
+ remoteVolumeName,
+ force)
+
+ @exportAsVerb
+ def volumeGeoRepResume(self, volumeName, remoteHost, remoteVolumeName,
+ force=False, options=None):
+ self.svdsmProxy.glusterVolumeGeoRepResume(volumeName,
+ remoteHost,
+ remoteVolumeName,
+ force)
+
def getGlusterMethods(gluster):
l = []
diff --git a/vdsm/gluster/cli.py b/vdsm/gluster/cli.py
index 2e1c9a9..7993b18 100644
--- a/vdsm/gluster/cli.py
+++ b/vdsm/gluster/cli.py
@@ -1054,3 +1054,33 @@
return _parseVolumeTasks(xmltree)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
+
+
+@makePublic
+def volumeGeoRepPause(volumeName, remoteHost, remoteVolumeName, force=False):
+ command = _getGlusterVolCmd() + ["geo-replication", volumeName,
+ "%s::%s" % (remoteHost, remoteVolumeName),
+ "pause"]
+ if force:
+ command.append('force')
+ try:
+ _execGlusterXml(command)
+ return True
+ except ge.GlusterCmdFailedException as e:
+ raise ge.GlusterVolumeGeoRepPauseFailedException(rc=e.rc,
+ err=e.err)
+
+
+@makePublic
+def volumeGeoRepResume(volumeName, remoteHost, remoteVolumeName, force=False):
+ command = _getGlusterVolCmd() + ["geo-replication", volumeName,
+ "%s::%s" % (remoteHost, remoteVolumeName),
+ "resume"]
+ if force:
+ command.append('force')
+ try:
+ _execGlusterXml(command)
+ return True
+ except ge.GlusterCmdFailedException as e:
+ raise ge.GlusterVolumeGeoRepResumeFailedException(rc=e.rc,
+ err=e.err)
diff --git a/vdsm/gluster/exception.py b/vdsm/gluster/exception.py
index 0205cb1..b7914c0 100644
--- a/vdsm/gluster/exception.py
+++ b/vdsm/gluster/exception.py
@@ -506,3 +506,19 @@
class GlfsFiniException(GlusterLibgfapiException):
code = 4573
message = "glfs fini failed"
+
+
+#geo-replication
+class GlusterGeoRepException(GlusterException):
+ code = 4560
+ message = "Gluster Geo-Replication Exception"
+
+
+class GlusterVolumeGeoRepPauseFailedException(GlusterVolumeException):
+ code = 4575
+ message = "Volume geo-replication pause failed"
+
+
+class GlusterVolumeGeoRepResumeFailedException(GlusterVolumeException):
+ code = 4576
+ message = "Volume geo-replication resume failed"
diff --git a/vdsm/gluster/vdsmapi-gluster-schema.json b/vdsm/gluster/vdsmapi-gluster-schema.json
index 4ddd182..47003e5 100644
--- a/vdsm/gluster/vdsmapi-gluster-schema.json
+++ b/vdsm/gluster/vdsmapi-gluster-schema.json
@@ -1238,3 +1238,48 @@
{'command': {'class': 'GlusterVolume', 'name': 'statsInfoGet'},
'data': {'volumeName': 'str'},
'returns': 'GlusterVolumeStatsInfo'}
+
+
+##
+# @GlusterVolume.geoRepSessionPause:
+#
+# Pauses the Geo Replication session
+#
+# @volName: Is an existing volume name in the master node
+#
+# @remoteHost: Is remote slave host name or ip
+#
+# @remoteVolumeName: Is an available existing volume name in the slave node
+#
+# @force: For pausing a geo-replication session forcefully
+#
+# Returns:
+# True if session is successfully Paused
+#
+# Since: 4.16.0
+##
+{'command': {'class': 'GlusterVolume', 'name': 'geoRepSessionStart'},
+ 'data': {'volName': 'str', 'remoteHost': 'str', 'remoteVolumeName': 'str', 'force': 'bool'},
+ 'returns': 'bool'}
+
+##
+# @GlusterVolume.geoRepSessionResume:
+#
+# Resumes the Geo Replication session
+#
+# @volName: Is an existing volume name in the master node
+#
+# @remoteHost: Is remote slave host name or ip
+#
+# @remoteVolumeName: Is an available existing volume name in the slave node
+#
+# @force: For resuming a georeplication session forcefully
+#
+# Returns:
+# True if session is successfully resumed
+#
+# Since: 4.16.0
+##
+{'command': {'class': 'GlusterVolume', 'name': 'geoRepSessionStop'},
+ 'data': {'volName': 'str', 'remoteHost': 'str', 'remoteVolumeName': 'str', 'force': 'bool'},
+ 'returns': 'bool'}
--
To view, visit http://gerrit.ovirt.org/31069
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I024bcee148bab1e713e1bc5c73d288613c466656
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Darshan N <dnarayan(a)redhat.com>
9 years, 3 months
Change in vdsm[master]: vm: migration: move wait countdown in a function
by fromani@redhat.com
Francesco Romani has uploaded a new change for review.
Change subject: vm: migration: move wait countdown in a function
......................................................................
vm: migration: move wait countdown in a function
The Downtime thread is a migration helper thread
which is in charge of periodically updating the
migration downtime.
This thread blindly does a configurabile number
of steps, and after each step it updates the
downtime according to a given law.
Between each step, it waits for a number of
seconds according to a different law.
Both the cases on which migration ends while
downtime thread is running and after it
is ended are implicitely and gracefully
handled by the existing code, since it
is safe to set the downtime aynchronously.
The downtime update law was already extracted;
this patch also extracts the wait law,
in order to improve readability and testability.
Change-Id: Id808539a654e1556519ed54f6f4b67364eacf48a
Signed-off-by: Francesco Romani <fromani(a)redhat.com>
---
M vdsm/virt/migration.py
1 file changed, 15 insertions(+), 7 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/06/32706/1
diff --git a/vdsm/virt/migration.py b/vdsm/virt/migration.py
index 836fe3c..97333ca 100644
--- a/vdsm/virt/migration.py
+++ b/vdsm/virt/migration.py
@@ -362,8 +362,17 @@
yield offset + int(base ** i)
+def linear_wait(mem_size, delay, steps):
+ wait = (delay * max(mem_size, 2048) + 1023) / 1024
+
+ for _ in range(steps):
+ yield wait / steps
+
+
class DowntimeThread(threading.Thread):
DOWNTIME_STEPS = config.getint('vars', 'migration_downtime_steps')
+
+ DELAY_PER_GIB = config.getint('vars', 'migration_downtime_delay')
def __init__(self, vm, downtime):
super(DowntimeThread, self).__init__()
@@ -373,19 +382,18 @@
self._stop = threading.Event()
- delay_per_gib = config.getint('vars', 'migration_downtime_delay')
- mem_size = int(self._vm.conf['memSize'])
- self._wait = (delay_per_gib * max(mem_size, 2048) + 1023) / 1024
-
self.daemon = True
self.start()
def run(self):
self._vm.log.debug('migration downtime thread started')
- for downtime in exponential_downtime(self._downtime,
- self.DOWNTIME_STEPS):
- self._stop.wait(self._wait / self.DOWNTIME_STEPS)
+ for downtime, wait in zip(exponential_downtime(self._downtime,
+ self.DOWNTIME_STEPS),
+ linear_wait(int(self._vm.conf['memSize']),
+ self.DELAY_PER_GIB,
+ self.DOWNTIME_STEPS)):
+ self._stop.wait(wait)
if self._stop.isSet():
break
--
To view, visit http://gerrit.ovirt.org/32706
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Id808539a654e1556519ed54f6f4b67364eacf48a
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Francesco Romani <fromani(a)redhat.com>
9 years, 3 months
Change in vdsm[master]: vdsm-gluster: Added gluster volume geo-replication start verb
by tjeyasin@redhat.com
Hello Ayal Baron, Bala.FA, Saggi Mizrahi, Dan Kenigsberg,
I'd like you to do a code review. Please visit
http://gerrit.ovirt.org/17766
to review the following change.
Change subject: vdsm-gluster: Added gluster volume geo-replication start verb
......................................................................
vdsm-gluster: Added gluster volume geo-replication start verb
Start the geo-replication session between the hosts.
Start distributed geo-replication on all the nodes that are a part
of the master-volume. Even if any node, that is a part of the
master-volume is down, the command will still be successful.
Change-Id: I3cf03c748cf9fe28efe7d407727cd52da20701c5
Signed-off-by: Timothy Asir <tjeyasin(a)redhat.com>
---
M client/vdsClientGluster.py
M vdsm/gluster/api.py
M vdsm/gluster/cli.py
M vdsm/gluster/exception.py
4 files changed, 100 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/66/17766/1
diff --git a/client/vdsClientGluster.py b/client/vdsClientGluster.py
index 90af83e..feb6387 100644
--- a/client/vdsClientGluster.py
+++ b/client/vdsClientGluster.py
@@ -424,6 +424,34 @@
pp.pprint(status)
return status['status']['code'], status['status']['message']
+ def do_glusterVolumeGeoRepStart(self, args):
+ params = self._eqSplit(args)
+ masterVolName = params.get('masterVolName', '')
+ slaveHost = params.get('slaveHost', '')
+ slaveVolName = params.get('slaveVolName', '')
+ if not(masterVolName and slaveHost and slaveVolName):
+ raise ValueError
+
+ status = self.s.glusterVolumeGeoRepStart(masterVolName,
+ slaveHost,
+ slaveVolName)
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeGeoRepStop(self, args):
+ params = self._eqSplit(args)
+ masterVolName = params.get('masterVolName', '')
+ slaveHost = params.get('slaveHost', '')
+ slaveVolName = params.get('slaveVolName', '')
+ if not(masterVolName and slaveHost and slaveVolName):
+ raise ValueError
+
+ status = self.s.glusterVolumeGeoRepStop(masterVolName,
+ slaveHost,
+ slaveVolName)
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
def getGlusterCmdDict(serv):
return \
@@ -705,4 +733,26 @@
'not set'
'(swift, glusterd, smb, memcached)'
)),
+ 'glusterVolumeGeoRepStart': (
+ serv.do_glusterVolumeGeoRepStart,
+ ('masterVolName=<master_volume_name> slaveHost=<slave_host> '
+ 'slaveVolName=<slave_volume_name>\n\t'
+ '<master_volume_name> is an existing volume name in the '
+ 'master node\n\t'
+ '<slave_host> is slave host name\n\t'
+ '<slave_volume_name> is an existing volume name in the '
+ 'slave node',
+ 'start volume geo-replication'
+ )),
+ 'glusterVolumeGeoRepStop': (
+ serv.do_glusterVolumeGeoRepStop,
+ ('masterVolName=<master_volume_name> slaveHost=<slave_host> '
+ 'slaveVolName=<slave_volume_name>\n\t'
+ '<master_volume_name> is an existing volume name in the '
+ 'master node\n\t'
+ '<slave_host> is slave host name\n\t'
+ '<slave_volume_name> is an existing volume name in the '
+ 'slave node',
+ 'stop volume geo-replication'
+ )),
}
diff --git a/vdsm/gluster/api.py b/vdsm/gluster/api.py
index 4bd8308..ed9f5ae 100644
--- a/vdsm/gluster/api.py
+++ b/vdsm/gluster/api.py
@@ -287,6 +287,20 @@
status = self.svdsmProxy.glusterServicesGet(serviceNames)
return {'services': status}
+ @exportAsVerb
+ def volumeGeoRepStart(self, masterVolName, slaveHost, slaveVolName,
+ options=None):
+ self.svdsmProxy.glusterVolumeGeoRepStart(masterVolName,
+ slaveHost,
+ slaveVolName)
+
+ @exportAsVerb
+ def volumeGeoRepStop(self, masterVolName, slaveHost, slaveVolName,
+ options=None):
+ self.svdsmProxy.glusterVolumeGeoRepStop(masterVolName,
+ slaveHost,
+ slaveVolName)
+
def getGlusterMethods(gluster):
l = []
diff --git a/vdsm/gluster/cli.py b/vdsm/gluster/cli.py
index bac6d1c..e4d6615 100644
--- a/vdsm/gluster/cli.py
+++ b/vdsm/gluster/cli.py
@@ -897,3 +897,29 @@
return _parseVolumeProfileInfo(xmltree, nfs)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
+
+
+@makePublic
+def volumeGeoRepStart(masterVolName, slaveHost, slaveVolName):
+ command = _getGlusterVolCmd() + ["geo-replication", masterVolName,
+ "%s::%s" % (slaveHost, slaveVolName),
+ "start"]
+ try:
+ _execGlusterXml(command)
+ return True
+ except ge.GlusterCmdFailedException as e:
+ raise ge.GlusterVolumeGeoRepStartFailedException(rc=e.rc,
+ err=e.err)
+
+
+@makePublic
+def volumeGeoRepStop(masterVolName, slaveHost, slaveVolName):
+ command = _getGlusterVolCmd() + ["geo-replication", masterVolName,
+ "%s::%s" % (slaveHost, slaveVolName),
+ "stop"]
+ try:
+ _execGlusterXml(command)
+ return True
+ except ge.GlusterCmdFailedException as e:
+ raise ge.GlusterVolumeGeoRepStopFailedException(rc=e.rc,
+ err=e.err)
diff --git a/vdsm/gluster/exception.py b/vdsm/gluster/exception.py
index c569a9e..259df32 100644
--- a/vdsm/gluster/exception.py
+++ b/vdsm/gluster/exception.py
@@ -484,3 +484,13 @@
prefix = "%s: " % (action)
self.message = prefix + "Service action is not supported"
self.err = [self.message]
+
+
+class GlusterVolumeGeoRepStartFailedException(GlusterVolumeException):
+ code = 4164
+ message = "Volume geo-replication start failed"
+
+
+class GlusterVolumeGeoRepStopFailedException(GlusterVolumeException):
+ code = 4165
+ message = "Volume geo-replication stop failed"
--
To view, visit http://gerrit.ovirt.org/17766
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I3cf03c748cf9fe28efe7d407727cd52da20701c5
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Timothy Asir <tjeyasin(a)redhat.com>
Gerrit-Reviewer: Ayal Baron <abaron(a)redhat.com>
Gerrit-Reviewer: Bala.FA <barumuga(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
9 years, 3 months
Change in vdsm[master]: image: use qemu-img convert to copy internal volumes
by Federico Simoncelli
Federico Simoncelli has uploaded a new change for review.
Change subject: image: use qemu-img convert to copy internal volumes
......................................................................
image: use qemu-img convert to copy internal volumes
According to the qemu-img manual:
You can use the backing_file option to force the output image to be
created as a copy on write image of the specified base image; the
backing_file should have the same content as the input's base image,
however the path, image format, etc may differ.
The benefits to use qemu-img convert instead of dd to copy the internal
volumes are:
- we unify operations under just one tool that is specific for image
manipulations
- copying images from block domains we won't copy the entire chunk
(1Gb) but only the amount of data really in use
The downside is the additional call to getParentVolume (that seems
trascurable compared to the amount of data that we're going to copy).
Change-Id: I1c740d88d52ca678d6c02d0ea500d2459c26560c
Signed-off-by: Federico Simoncelli <fsimonce(a)redhat.com>
---
M vdsm/storage/image.py
1 file changed, 18 insertions(+), 14 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/55/33355/1
diff --git a/vdsm/storage/image.py b/vdsm/storage/image.py
index 9855a26..306722b 100644
--- a/vdsm/storage/image.py
+++ b/vdsm/storage/image.py
@@ -430,21 +430,25 @@
try:
dstVol = destDom.produceVolume(imgUUID=imgUUID,
volUUID=srcVol.volUUID)
- srcFmt = srcVol.getFormat()
- if srcFmt == volume.RAW_FORMAT:
- srcFmtStr = volume.fmt2str(srcFmt)
- dstFmtStr = volume.fmt2str(dstVol.getFormat())
- self.log.debug("start qemu convert")
- qemuimg.convert(srcVol.getVolumePath(),
- dstVol.getVolumePath(),
- vars.task.aborting,
- srcFmtStr, dstFmtStr)
+ srcFmtStr = volume.fmt2str(srcVol.getFormat())
+ dstFmtStr = volume.fmt2str(dstVol.getFormat())
+
+ pntVol = dstVol.getParentVolume()
+
+ if pntVol is not None:
+ pntBakPath = volume.getBackingVolumePath(
+ imgUUID, pntVol.volUUID)
+ pntFmtStr = volume.fmt2str(pntVol.getFormat())
else:
- srcSize = srcVol.getVolumeSize(bs=1)
- misc.ddWatchCopy(srcVol.getVolumePath(),
- dstVol.getVolumePath(),
- vars.task.aborting,
- size=srcSize)
+ pntBakPath = None
+ pntFmtStr = None
+
+ self.log.debug("start qemu convert")
+ qemuimg.convert(srcVol.getVolumePath(),
+ dstVol.getVolumePath(),
+ vars.task.aborting,
+ srcFmtStr, dstFmtStr,
+ pntBakPath, pntFmtStr)
except ActionStopped:
raise
except se.StorageException:
--
To view, visit http://gerrit.ovirt.org/33355
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I1c740d88d52ca678d6c02d0ea500d2459c26560c
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Federico Simoncelli <fsimonce(a)redhat.com>
9 years, 3 months
Change in vdsm[master]: networkTests: Extend a test to also consider DHCPv6
by osvoboda@redhat.com
Ondřej Svoboda has uploaded a new change for review.
Change subject: networkTests: Extend a test to also consider DHCPv6
......................................................................
networkTests: Extend a test to also consider DHCPv6
Change-Id: Ic5d821edd54681a7a8c1013a90af61ae835baa39
Signed-off-by: Ondřej Svoboda <osvoboda(a)redhat.com>
---
M tests/functional/dummy.py
M tests/functional/firewall.py
M tests/functional/networkTests.py
3 files changed, 32 insertions(+), 16 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/32/30532/1
diff --git a/tests/functional/dummy.py b/tests/functional/dummy.py
index aa56c71..b94c77d 100644
--- a/tests/functional/dummy.py
+++ b/tests/functional/dummy.py
@@ -54,9 +54,9 @@
(dummyName, e))
-def setIP(dummyName, ipaddr, netmask):
+def setIP(dummyName, ipaddr, netmask, family=4):
try:
- addrAdd(dummyName, ipaddr, netmask)
+ addrAdd(dummyName, ipaddr, netmask, family)
except IPRoute2Error:
raise SkipTest('Failed to set device ip')
diff --git a/tests/functional/firewall.py b/tests/functional/firewall.py
index ee7bb9f..9940a75 100644
--- a/tests/functional/firewall.py
+++ b/tests/functional/firewall.py
@@ -43,7 +43,10 @@
if _serviceRunning('iptables'):
_execCmdChecker([_IPTABLES_BINARY.cmd, '-I', 'INPUT', '-i',
veth, '-p', 'udp', '--sport', '68', '--dport',
- '67', '-j', 'ACCEPT'])
+ '67', '-j', 'ACCEPT']) # DHCPv4
+ _execCmdChecker([_IPTABLES_BINARY.cmd, '-I', 'INPUT', '-i',
+ veth, '-p', 'udp', '--sport', '546', '--dport',
+ '547', '-j', 'ACCEPT']) # DHCPv6
elif _serviceRunning('firewalld'):
_execCmdChecker([_FIREWALLD_BINARY.cmd, '--zone=trusted',
'--change-interface=' + veth])
@@ -68,7 +71,10 @@
if _serviceRunning('iptables'):
_execCmdChecker([_IPTABLES_BINARY.cmd, '-D', 'INPUT', '-i',
veth, '-p', 'udp', '--sport', '68', '--dport',
- '67', '-j', 'ACCEPT'])
+ '67', '-j', 'ACCEPT']) # DHCPv4
+ _execCmdChecker([_IPTABLES_BINARY.cmd, '-D', 'INPUT', '-i',
+ veth, '-p', 'udp', '--sport', '546', '--dport',
+ '547', '-j', 'ACCEPT']) # DHCPv6
elif _serviceRunning('firewalld'):
_execCmdChecker([_FIREWALLD_BINARY.cmd, '--zone=trusted',
'--remove-interface=' + veth])
diff --git a/tests/functional/networkTests.py b/tests/functional/networkTests.py
index f87f49b..0033c19 100644
--- a/tests/functional/networkTests.py
+++ b/tests/functional/networkTests.py
@@ -64,8 +64,11 @@
DHCP_RANGE_TO = '240.0.0.100'
CUSTOM_PROPS = {'linux': 'rules', 'vdsm': 'as well'}
-IPv6_ADDRESS = 'fdb3:84e5:4ff4:55e3::1/64'
+IPv6_ADDRESS = 'fdb3:84e5:4ff4:55e3::1'
+IPv6_CIDR = '64'
+IPv6_ADDRESS_AND_CIDR = IPv6_ADDRESS + '/' + IPv6_CIDR
IPv6_GATEWAY = 'fdb3:84e5:4ff4:55e3::ff'
+DHCPv6_ADDRESS = 'fdb3:84e5:4ff4:55e3::2'
dummyPool = set()
DUMMY_POOL_SIZE = 5
@@ -90,11 +93,16 @@
@contextmanager
-def dnsmasqDhcp(interface):
+def dnsmasqDhcp(interface, family=4):
"""Manages the life cycle of dnsmasq as a DHCP server."""
dhcpServer = dhcp.Dnsmasq()
+ if family == 4:
+ rangeFrom, rangeTo = DHCP_RANGE_FROM, DHCP_RANGE_TO
+ else:
+ rangeFrom, rangeTo = DHCPv6_ADDRESS, DHCPv6_ADDRESS
+
try:
- dhcpServer.start(interface, DHCP_RANGE_FROM, DHCP_RANGE_TO)
+ dhcpServer.start(interface, rangeFrom, rangeTo)
except dhcp.DhcpError as e:
raise SkipTest(e)
@@ -1769,11 +1777,11 @@
nic, = nics
networks = {
NETWORK_NAME + '1':
- {'nic': nic, 'bootproto': 'none', 'ipv6addr': IPv6_ADDRESS,
- 'ipv6gateway': IPv6_GATEWAY},
+ {'nic': nic, 'bootproto': 'none', 'ipv6gateway': IPv6_GATEWAY,
+ 'ipv6addr': IPv6_ADDRESS_AND_CIDR},
NETWORK_NAME + '2':
- {'nic': nic, 'bootproto': 'none', 'ipv6addr': IPv6_ADDRESS,
- 'ipv6gateway': IPv6_GATEWAY, 'ipaddr': IP_ADDRESS,
+ {'nic': nic, 'bootproto': 'none', 'ipv6gateway': IPv6_GATEWAY,
+ 'ipv6addr': IPv6_ADDRESS_AND_CIDR, 'ipaddr': IP_ADDRESS,
'gateway': IP_GATEWAY,
'netmask': prefix2netmask(int(IP_CIDR))}}
for network, netdict in networks.iteritems():
@@ -1783,7 +1791,7 @@
self.assertEqual(status, SUCCESS, msg)
self.assertNetworkExists(network)
self.assertIn(
- IPv6_ADDRESS,
+ IPv6_ADDRESS_AND_CIDR,
self.vdsm_net.netinfo.networks[network]['ipv6addrs'])
self.assertEqual(
IPv6_GATEWAY,
@@ -1836,14 +1844,15 @@
NOCHK)
@permutations([[True], [False]])
+ @permutations([[(4, IP_ADDRESS, IP_CIDR)], [(6, IPv6_ADDRESS, IPv6_CIDR)]])
@cleanupNet
@RequireVethMod
@ValidateRunningAsRoot
- def testSetupNetworksAddDelDhcp(self, bridged):
+ def testSetupNetworksAddDelDhcp(self, bridged, (family, addr, cidr)):
with vethIf() as (left, right):
- veth.setIP(left, IP_ADDRESS, IP_CIDR)
+ veth.setIP(left, addr, cidr)
veth.setLinkUp(left)
- with dnsmasqDhcp(left):
+ with dnsmasqDhcp(left, family):
network = {NETWORK_NAME: {'nic': right, 'bridged': bridged,
'bootproto': 'dhcp',
'blockingdhcp': True}}
@@ -1853,7 +1862,8 @@
self.assertNetworkExists(NETWORK_NAME)
net = self.vdsm_net.netinfo.networks[NETWORK_NAME]
- self.assertEqual(net['bootproto4'], 'dhcp')
+ if family == 4:
+ self.assertEqual(net['bootproto4'], 'dhcp')
if bridged:
self.assertEqual(net['cfg']['BOOTPROTO'], 'dhcp')
--
To view, visit http://gerrit.ovirt.org/30532
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ic5d821edd54681a7a8c1013a90af61ae835baa39
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Ondřej Svoboda <osvoboda(a)redhat.com>
9 years, 3 months