Change in vdsm[master]: [WIP] Added glusterVolumeTop verb
by tjeyasin@redhat.com
Hello Ayal Baron, Bala.FA, Saggi Mizrahi, Federico Simoncelli, Dan Kenigsberg,
I'd like you to do a code review. Please visit
http://gerrit.ovirt.org/7844
to review the following change.
Change subject: [WIP] Added glusterVolumeTop verb
......................................................................
[WIP] Added glusterVolumeTop verb
Added glusterVolumeTopOpen verb
Added glusterVolumeTopRead verb
Added glusterVolumeTopWrite verb
Added glusterVolumeTopOpenDir verb
Added glusterVolumeTopReadDir verb
Added glusterVolumeTopReadPerf
verb Added glusterVolumeTopWritePerf verb
Following is the output structure of glusterVolumeTopOpen
{'statusCode' : CODE,
'brickCount': BRICK-COUNT,
'bricks': {BRICK-NAME: {'count':FILE-COUNT,
'currentOpenFds': CURRENT-OPEN-FDS-COUNT,
'maxOpen': MAX-OPEN,
'maxOpenTime': MAX-OPEN-TIME,
'files': [{FILE-NAME: FILE-OPEN-COUNT}, ...]
}, ...} }
Following is the output structure of glusterVolumeTopRead
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {
'count': FILE-COUNT,
'files': [{FILE-NAME: FILE-READ-COUNT}, ...]}
,...}}
Following is the output structure glusterVolumeTopWrite
{'statusCode' : CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'count': FILE-COUNT,
'files': [{FILE-NAME: FILE-WRITE-COUNT}...]}
,...}}
Following is the output structure glusterVolumeTopOpenDir
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'count':OPEN-DIR-COUNT,
'files': [{DIR-NAME: DIR-OPEN-COUNT}, ...]}
,...}
Following is the output structure glusterVolumeTopReadDir
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'count':READ-DIR-COUNT,
'files': [{DIR-NAME: DIR-READ-COUNT}, ...]}
,...}
Following is the output structure glusterVolumeTopReadPerf
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'fileCount':READ-COUNT,
'throughput': BRICK-WISE-READ-THROUGHPUT,
' timeTaken': TIME-TAKEN,
'files': [{FILE-NAME:
{'throughput':FILE-READ-THROUGHPUT,
'time': TIME}}, ...]}
,...}}
Following is the output structure glusterVolumeTopWritePerf
{'statusCode': CODE,
'brickCount': BRICK-COUNT,
'topOp': TOP-OP,
'bricks': {BRICK-NAME: {'fileCount':WRITE-COUNT,
'throughput': BRICK-WISE-WRITE-THROUGHPUT,
' timeTaken': TIME-TAKEN,
'files': [{FILE-NAME:
{'throughput':FILE-WRITE-THROUGHPUT,
'time': TIME}}, ...]}
,...}}
Change-Id: I96486363a9acb7472014a67fcd2d5185d4f3c428
Signed-off-by: Timothy Asir <tjeyasin(a)redhat.com>
---
M vdsm/gluster/api.py
M vdsm/gluster/cli.py
M vdsm/gluster/exception.py
M vdsm_cli/vdsClientGluster.py
4 files changed, 372 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/44/7844/1
diff --git a/vdsm/gluster/api.py b/vdsm/gluster/api.py
index e52430b..3f493e0 100644
--- a/vdsm/gluster/api.py
+++ b/vdsm/gluster/api.py
@@ -241,6 +241,61 @@
status = self.svdsmProxy.glusterVolumeProfileInfo(volumeName)
return {'profileInfo': status}
+ @exportAsVerb
+ def volumeTopOpen(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopOpen(volumeName,
+ brickName, count)
+ return {'topOpen': status}
+
+ @exportAsVerb
+ def volumeTopRead(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopRead(volumeName,
+ brickName, count)
+ return {'topRead': status}
+
+ @exportAsVerb
+ def volumeTopWrite(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopWrite(volumeName,
+ brickName, count)
+ return {'topWrite': status}
+
+ @exportAsVerb
+ def volumeTopOpenDir(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopOpenDir(volumeName,
+ brickName, count)
+ return {'topOpenDir': status}
+
+ @exportAsVerb
+ def volumeTopWriteDir(self, volumeName, brickName=None, count=None,
+ options=None):
+ status = self.svdsmProxy.glusterVolumeTopWriteDir(volumeName,
+ brickName, count)
+ return {'topWriteDir': status}
+
+ @exportAsVerb
+ def volumeTopReadPerf(self, volumeName, blockSize=None, count=None,
+ brickName=None, listCount=None, options=None):
+ status = self.svdsmProxy.glusterVolumeTopReadPerf(volumeName,
+ blockSize,
+ count,
+ brickName,
+ listCount)
+ return {'topReadPerf': status}
+
+ @exportAsVerb
+ def volumeTopWritePerf(self, volumeName, blockSize=None, count=None,
+ brickName=None, listCount=None, options=None):
+ status = self.svdsmProxy.glusterVolumeTopWritePerf(volumeName,
+ blockSize,
+ count,
+ brickName,
+ listCount)
+ return {'topWritePerf': status}
+
def getGlusterMethods(gluster):
l = []
diff --git a/vdsm/gluster/cli.py b/vdsm/gluster/cli.py
index b91a04f..ba4768c 100644
--- a/vdsm/gluster/cli.py
+++ b/vdsm/gluster/cli.py
@@ -334,6 +334,66 @@
return volumeInfoDict
+def _parseGlusterVolumeTopOpen(tree):
+ bricks = {}
+ for brick in tree.findall('volTop/brick'):
+ fileList = []
+ for file in brick.findall('file'):
+ fileList.append({file.find('filename').text:
+ file.find('count').text})
+ bricks[brick.find('name').text] = {
+ 'count': brick.find('members').text,
+ 'currentOpen': brick.find('currentOpen').text,
+ 'maxOpen': brick.find('maxOpen').text,
+ 'maxOpenTime': brick.find('maxOpenTime').text,
+ 'files': fileList}
+ status = {
+ 'topOp': tree.find('volTop/topOp').text,
+ 'brickCount': tree.find('volTop/brickCount').text,
+ 'statusCode': tree.find('opRet').text,
+ 'bricks': bricks}
+ return status
+
+
+def _parseGlusterVolumeTop(tree):
+ bricks = {}
+ for brick in tree.findall('volTop/brick'):
+ fileList = []
+ for fileTag in brick.findall('file'):
+ fileList.append({fileTag.find('filename').text:
+ fileTag.find('count').text})
+ bricks[brick.find('name').text] = {
+ 'count': brick.find('members').text,
+ 'files': fileList}
+ status = {
+ 'topOp': tree.find('volTop/topOp').text,
+ 'brickCount': tree.find('volTop/brickCount').text,
+ 'statusCode': tree.find('opRet').text,
+ 'bricks': bricks}
+ return status
+
+
+def _parseGlusterVolumeTopPerf(tree):
+ bricks = {}
+ for brick in tree.findall('volTop/brick'):
+ fileList = []
+ for fileTag in brick.findall('file'):
+ fileList.append({fileTag.find('filename').text:
+ {'count': fileTag.find('count').text,
+ 'time': fileTag.find('time').text}})
+ bricks[brick.find('name').text] = {
+ 'count': brick.find('members').text,
+ 'throughput': brick.find('throughput').text,
+ 'timeTaken': brick.find('timeTaken').text,
+ 'files': fileList}
+ status = {
+ 'topOp': tree.find('volTop/topOp').text,
+ 'brickCount': tree.find('volTop/brickCount').text,
+ 'statusCode': tree.find("opRet").text,
+ 'bricks': bricks}
+ return status
+
+
def _parseGlusterVolumeProfileInfo(tree):
bricks = {}
for brick in tree.findall('volProfile/brick'):
@@ -819,3 +879,132 @@
return _parseGlusterVolumeProfileInfo(xmltree)
except:
raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopOpen(volumeName, brickName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "open"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopOpenFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTopOpen(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopRead(volumeName, brickName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "read"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopReadFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTop(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopWrite(volumeName, brickName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "write"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopWriteFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTop(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopOpenDir(volumeName, brickName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "opendir"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopOpenDirFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTop(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopReadDir(volumeName, bricName=None, count=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "write"]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if count:
+ command += ["list-cnt", "%s" % count]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopReadDirFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTop(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopReadPerf(volumeName, blockSize=None, count=None,
+ brickName=None, listCount=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "read-perf"]
+ if blockSize:
+ command += ["bs", "%s" % blockSize]
+ if count:
+ command += ["count", "%s" % count]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if listCount:
+ command += ["list-cnt", "%s" % listCount]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopReadPerfFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTopPerf(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
+@exportToSuperVdsm
+def volumeTopWritePerf(volumeName, blockSize=None, count=None,
+ brickName=None, listCount=None):
+ command = _getGlusterVolCmd() + ["top", volumeName, "write-perf"]
+ if blockSize:
+ command += ["bs", "%s" % blockSize]
+ if count:
+ command += ["count", "%s" % count]
+ if brickName:
+ command += ["brick", "%s" % brickName]
+ if listCount:
+ command += ["list-cnt", "%s" % listCount]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeTopWritePerfFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseGlusterVolumeTopPerf(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
diff --git a/vdsm/gluster/exception.py b/vdsm/gluster/exception.py
index bc20dd0..b392ec8 100644
--- a/vdsm/gluster/exception.py
+++ b/vdsm/gluster/exception.py
@@ -343,6 +343,41 @@
message = "Volume profile info failed"
+class GlusterVolumeTopOpenFailedException(GlusterVolumeException):
+ code = 4161
+ message = "Volume top open failed"
+
+
+class GlusterVolumeTopReadFailedException(GlusterVolumeException):
+ code = 4162
+ message = "Volume top read failed"
+
+
+class GlusterVolumeTopWriteFailedException(GlusterVolumeException):
+ code = 4163
+ message = "Volume top write failed"
+
+
+class GlusterVolumeTopOpenDirFailedException(GlusterVolumeException):
+ code = 4164
+ message = "Volume top open dir failed"
+
+
+class GlusterVolumeTopReadDirFailedException(GlusterVolumeException):
+ code = 4165
+ message = "Volume top read dir failed"
+
+
+class GlusterVolumeTopReadPerfFailedException(GlusterVolumeException):
+ code = 4166
+ message = "Volume top read perf failed"
+
+
+class GlusterVolumeTopWritePerfFailedException(GlusterVolumeException):
+ code = 4167
+ message = "Volume top write perf failed"
+
+
# Host
class GlusterHostException(GlusterException):
code = 4400
diff --git a/vdsm_cli/vdsClientGluster.py b/vdsm_cli/vdsClientGluster.py
index 8422695..3663c63 100644
--- a/vdsm_cli/vdsClientGluster.py
+++ b/vdsm_cli/vdsClientGluster.py
@@ -221,6 +221,41 @@
pp.pprint(status)
return status['status']['code'], status['status']['message']
+ def do_glusterVolumeTopOpen(self, args):
+ status = self.s.glusterVolumeTopOpen(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopRead(self, args):
+ status = self.s.glusterVolumeTopRead(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopWrite(self, args):
+ status = self.s.glusterVolumeTopWrite(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopOpenDir(self, args):
+ status = self.s.glusterVolumeTopOpenDir(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopReadDir(self, args):
+ status = self.s.glusterVolumeTopReadDir(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopReadPerf(self, args):
+ status = self.s.glusterVolumeTop(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterVolumeTopWritePerf(self, args):
+ status = self.s.glusterVolumeTop(args[0])
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
def getGlusterCmdDict(serv):
return {
@@ -403,4 +438,62 @@
('<volume_name>\n\t<volume_name> is existing volume name',
'get gluster volume profile info'
)),
+ 'glusterVolumeTopOpen':
+ (serv.do_glusterVolumeTopOpen,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get volume top open fd count and maximum fd count of '
+ 'a given volume with its all brick or specified brick'
+ )),
+ 'glusterVolumeTopRead':
+ (serv.do_glusterVolumeTopRead,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of highest read calls on each brick or '
+ 'a specified brick of a volume'
+ )),
+ 'glusterVolumeTopWrite':
+ (serv.do_glusterVolumeTopWrite,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of highest write calls on each brick or '
+ 'a specified brick of a volume'
+ )),
+ 'glusterVolumeTopOpenDir':
+ (serv.do_glusterVolumeTopOpenDir,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of highest open calls on directories of each brick '
+ 'or a specified brick of a volume'
+ )),
+ 'glusterVolumeTopReadDir':
+ (serv.do_glusterVolumeTopReadDir,
+ ('<volume_name> [brick=<existing_brick>] '
+ '[count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of highest read calls on directories of each brick '
+ 'or a specified brick of a volume'
+ )),
+ 'glusterVolumeTopReadPerf':
+ (serv.do_glusterVolumeTopReadPerf,
+ ('<volume_name> [block_size=<block_size>] '
+ '[count=<count>] [brick=<existing_brick>] '
+ '[list_count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of read throughput of files on bricks. '
+ 'if the block size and the count is not specified, '
+ 'it will give the output based on historical data'
+ )),
+ 'glusterVolumeTopWritePerf':
+ (serv.do_glusterVolumeTopWritePerf,
+ ('<volume_name> [block_size=<block_size>] '
+ '[count=<count>] [brick=<existing_brick>] '
+ '[list_count=<list_count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ 'get list of write throughput of files on bricks'
+ )),
}
--
To view, visit http://gerrit.ovirt.org/7844
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I96486363a9acb7472014a67fcd2d5185d4f3c428
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Timothy Asir <tjeyasin(a)redhat.com>
Gerrit-Reviewer: Ayal Baron <abaron(a)redhat.com>
Gerrit-Reviewer: Bala.FA <barumuga(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Federico Simoncelli <fsimonce(a)redhat.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
7 years, 7 months
Change in vdsm[master]: gluster: Setup and verify ssl connection between nodes.
by tjeyasin@redhat.com
Hello Ayal Baron, Bala.FA, Saggi Mizrahi, Dan Kenigsberg,
I'd like you to do a code review. Please visit
http://gerrit.ovirt.org/18355
to review the following change.
Change subject: gluster: Setup and verify ssl connection between nodes.
......................................................................
gluster: Setup and verify ssl connection between nodes.
This will be used in geo-replication session creation.
Because, there should be password-less ssh access between
at least one node of master volume and one node of slave
volume before creating geo-replication session.
Below new verbs are added
*glusterValidateSshConnection
*glusterSetupSshConnection
Change-Id: Ia6f040b1343998de4f8e28419c63e380240368db
Signed-off-by: Bala.FA <barumuga(a)redhat.com>
Signed-off-by: Timothy Asir <tjeyasin(a)redhat.com>
---
M client/vdsClientGluster.py
M vdsm.spec.in
M vdsm/gluster/api.py
M vdsm/gluster/exception.py
4 files changed, 191 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/55/18355/1
diff --git a/client/vdsClientGluster.py b/client/vdsClientGluster.py
index 90af83e..0ae7ec7 100644
--- a/client/vdsClientGluster.py
+++ b/client/vdsClientGluster.py
@@ -424,6 +424,30 @@
pp.pprint(status)
return status['status']['code'], status['status']['message']
+ def do_glusterValidateSshConnection(self, args):
+ params = self._eqSplit(args)
+ host = params.get('host', '')
+ fingerprint = params.get('fingerprint', '')
+ username = params.get('username', '')
+
+ status = self.s.glusterValidateSshConnection(host,
+ fingerprint,
+ username)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterSetupSshConnection(self, args):
+ params = self._eqSplit(args)
+ host = params.get('host', '')
+ fingerprint = params.get('fingerprint', '')
+ username = params.get('username', '')
+ password = params.get('password', '')
+
+ status = self.s.glusterSetupSshConnection(host,
+ fingerprint,
+ username,
+ password)
+ return status['status']['code'], status['status']['message']
+
def getGlusterCmdDict(serv):
return \
@@ -705,4 +729,15 @@
'not set'
'(swift, glusterd, smb, memcached)'
)),
+ 'glusterValidateSshConnection': (
+ serv.do_glusterValidateSshConnection,
+ ('host=<host> fingerprint=<fingerprint> username=<username>',
+ 'validate passwordless ssh connection'
+ )),
+ 'glusterSetupSshConnection': (
+ serv.do_glusterSetupSshConnection,
+ ('host=<host> fingerprint=<fingerprint> username=<username> '
+ 'password=<password>',
+ 'setup passwordless ssh connection'
+ )),
}
diff --git a/vdsm.spec.in b/vdsm.spec.in
index e2307e0..81d8f9f 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -524,6 +524,7 @@
Requires: glusterfs-fuse
Requires: glusterfs-rdma
Requires: python-magic
+Requires: python-paramiko
%description gluster
Gluster plugin enables VDSM to serve Gluster functionalities.
diff --git a/vdsm/gluster/api.py b/vdsm/gluster/api.py
index 4bd8308..1d93150 100644
--- a/vdsm/gluster/api.py
+++ b/vdsm/gluster/api.py
@@ -19,11 +19,28 @@
#
from functools import wraps
+import socket
+import paramiko
+import logging
+import os
+import re
from vdsm.define import doneCode
import supervdsm as svdsm
+from vdsm.config import config
+from vdsm import utils
+import exception as ge
_SUCCESS = {'status': doneCode}
+_KEYFILE = config.get('vars', 'trust_store_path') + '/keys/vdsmkey.pem'
+_sshKeyGenCommandPath = utils.CommandPath("ssh-keygen",
+ "/usr/bin/ssh-keygen",
+ )
+_SSH_COPY_ID_CMD = 'umask 077 && mkdir -p ~/.ssh && ' \
+ 'cat >> ~/.ssh/authorized_keys && if test -x /sbin/restorecon; ' \
+ 'then /sbin/restorecon ~/.ssh ~/.ssh/authorized_keys >/dev/null 2>&1; ' \
+ 'else true; fi'
+paramiko.util.get_logger('paramiko').setLevel(logging.ERROR)
GLUSTER_RPM_PACKAGES = (
('glusterfs', 'glusterfs'),
@@ -59,6 +76,57 @@
wrapper.exportAsVerb = True
return wrapper
+
+
+class VolumeStatus():
+ ONLINE = 'ONLINE'
+ OFFLINE = 'OFFLINE'
+
+
+class HostKeyMatchException(paramiko.SSHException):
+ def __init__(self, hostname, fingerprint, expected_fingerprint):
+ self.err = 'Fingerprint %s of host %s does not match with %s' % \
+ (fingerprint, hostname, expected_fingerprint)
+ paramiko.SSHException.__init__(self, self.err)
+ self.hostname = hostname
+ self.fingerprint = fingerprint
+ self.expected_fingerprint = expected_fingerprint
+
+
+class HostKeyMatchPolicy(paramiko.AutoAddPolicy):
+ def __init__(self, expected_fingerprint):
+ self.expected_fingerprint = expected_fingerprint
+
+ def missing_host_key(self, client, hostname, key):
+ s = paramiko.util.hexlify(key.get_fingerprint())
+ fingerprint = ':'.join(re.findall('..', s))
+ if fingerprint.upper() == self.expected_fingerprint.upper():
+ paramiko.AutoAddPolicy.missing_host_key(self, client, hostname,
+ key)
+ else:
+ raise HostKeyMatchException(hostname, fingerprint,
+ self.expected_fingerprint)
+
+
+class GlusterSsh(paramiko.SSHClient):
+ def __init__(self, hostname, fingerprint, port=22, username=None,
+ password=None, pkey=None, key_filenames=[], timeout=None,
+ allow_agent=True, look_for_keys=True, compress=False):
+ paramiko.SSHClient.__init__(self)
+ key_file_list = []
+ if os.path.exists(_KEYFILE):
+ key_file_list.append(_KEYFILE)
+ key_file_list.append(key_filenames)
+ self.set_missing_host_key_policy(HostKeyMatchPolicy(fingerprint))
+ try:
+ paramiko.SSHClient.connect(self, hostname, port, username,
+ password, pkey, key_file_list, timeout,
+ allow_agent, look_for_keys, compress)
+ except socket.error, e:
+ err = ['%s: %s' % (hostname, e)]
+ raise ge.GlusterSshConnectionFailedException(err=err)
+ except HostKeyMatchException, e:
+ raise ge.GlusterSshHostKeyMismatchException(err=[e.err])
class GlusterApi(object):
@@ -287,6 +355,57 @@
status = self.svdsmProxy.glusterServicesGet(serviceNames)
return {'services': status}
+ def _validateSshConnection(self, hostname, fingerprint, username):
+ try:
+ ssh = GlusterSsh(hostname,
+ fingerprint,
+ username=username)
+ ssh.close()
+ return True
+ except paramiko.SSHException, e:
+ raise ge.GlusterSshHostKeyAuthException(err=[str(e)])
+
+ @exportAsVerb
+ def validateSshConnection(self, hostname, fingerprint, username,
+ options=None):
+ self._validateSshConnection(hostname, fingerprint, username)
+
+ @exportAsVerb
+ def setupSshConnection(self, hostname, fingerprint, username, password,
+ options=None):
+ rc, out, err = utils.execCmd([_sshKeyGenCommandPath.cmd, '-y', '-f',
+ _KEYFILE])
+ if rc != 0:
+ raise ge.GlusterSshPubKeyGenerationFailedException(rc=rc, err=err)
+
+ try:
+ ssh = GlusterSsh(hostname,
+ fingerprint,
+ username=username,
+ password=password)
+ c = ssh.get_transport().open_session()
+ c.exec_command(_SSH_COPY_ID_CMD)
+ stdin = c.makefile('wb')
+ stdout = c.makefile('rb')
+ stderr = c.makefile_stderr('rb')
+ stdin.write('\n'.join(out) + '\n')
+ stdin.flush()
+ stdin.close()
+ c.shutdown_write()
+ rc = c.recv_exit_status()
+ out = stdout.read().splitlines()
+ err = stderr.read().splitlines()
+ c.close()
+ ssh.close()
+ if rc != 0:
+ raise ge.GlusterSshSetupExecFailedException(rc=rc,
+ out=out,
+ err=err)
+ except paramiko.AuthenticationException, e:
+ raise ge.GlusterSshHostAuthException(err=[str(e)])
+
+ self._validateSshConnection(hostname, fingerprint, username)
+
def getGlusterMethods(gluster):
l = []
diff --git a/vdsm/gluster/exception.py b/vdsm/gluster/exception.py
index c569a9e..c9a0548 100644
--- a/vdsm/gluster/exception.py
+++ b/vdsm/gluster/exception.py
@@ -484,3 +484,39 @@
prefix = "%s: " % (action)
self.message = prefix + "Service action is not supported"
self.err = [self.message]
+
+
+# Ssh
+class GlusterSshException(GlusterException):
+ code = 4500
+ message = "Gluster ssh exception"
+
+
+class GlusterSshConnectionFailedException(GlusterSshException):
+ code = 4501
+ message = "SSH connection failed"
+
+
+class GlusterSshHostKeyMismatchException(GlusterSshException):
+ code = 4502
+ message = "Host key match failed"
+
+
+class GlusterSshHostKeyAuthException(GlusterSshException):
+ code = 4503
+ message = "SSH host key authentication failed"
+
+
+class GlusterSshHostAuthException(GlusterSshException):
+ code = 4504
+ message = "SSH host authentication failed"
+
+
+class GlusterSshPubKeyGenerationFailedException(GlusterSshException):
+ code = 4505
+ message = "SSH public key generation failed"
+
+
+class GlusterSshSetupExecFailedException(GlusterSshException):
+ code = 4506
+ message = "SSH key setup execution failed"
--
To view, visit http://gerrit.ovirt.org/18355
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ia6f040b1343998de4f8e28419c63e380240368db
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Timothy Asir <tjeyasin(a)redhat.com>
Gerrit-Reviewer: Ayal Baron <abaron(a)redhat.com>
Gerrit-Reviewer: Bala.FA <barumuga(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
7 years, 7 months
Change in vdsm[master]: keep pauseCode when migrating paused VM
by ahadas@redhat.com
Arik Hadas has uploaded a new change for review.
Change subject: keep pauseCode when migrating paused VM
......................................................................
keep pauseCode when migrating paused VM
This patch fix a bug where the pauseCode is cleared on the destination
host when migrating paused VM.
Change-Id: Iead0697bbebba3f261040221b04cd3745d8ef036
Signed-off-by: Arik Hadas <ahadas(a)redhat.com>
---
M vdsm/virt/vm.py
1 file changed, 4 insertions(+), 3 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/01/27801/1
diff --git a/vdsm/virt/vm.py b/vdsm/virt/vm.py
index 1bd9fed..8127402 100644
--- a/vdsm/virt/vm.py
+++ b/vdsm/virt/vm.py
@@ -1952,8 +1952,9 @@
self._ongoingCreations.release()
self.log.debug("_ongoingCreations released")
- if ('migrationDest' in self.conf or 'restoreState' in self.conf) \
- and self.lastStatus != vmstatus.DOWN:
+ migrating = 'migrationDest' in self.conf or \
+ 'restoreState' in self.conf
+ if migrating and self.lastStatus != vmstatus.DOWN:
self._waitForIncomingMigrationFinish()
self.lastStatus = vmstatus.UP
@@ -1961,7 +1962,7 @@
self.conf['pauseCode'] = self._initTimePauseCode
if self._initTimePauseCode == 'ENOSPC':
self.cont()
- else:
+ elif not migrating:
try:
with self._confLock:
del self.conf['pauseCode']
--
To view, visit http://gerrit.ovirt.org/27801
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Iead0697bbebba3f261040221b04cd3745d8ef036
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Arik Hadas <ahadas(a)redhat.com>
7 years, 7 months
Change in vdsm[master]: do not use OOP for padding snapshot's memory volume
by ahadas@redhat.com
Arik Hadas has uploaded a new change for review.
Change subject: do not use OOP for padding snapshot's memory volume
......................................................................
do not use OOP for padding snapshot's memory volume
Change-Id: I2a94354e188019f3afd209633979ec5a5b35293b
Signed-off-by: Arik Hadas <ahadas(a)redhat.com>
---
M vdsm/virt/vm.py
1 file changed, 3 insertions(+), 9 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/38/26538/1
diff --git a/vdsm/virt/vm.py b/vdsm/virt/vm.py
index 6711bc6..b8ce533 100644
--- a/vdsm/virt/vm.py
+++ b/vdsm/virt/vm.py
@@ -3543,15 +3543,9 @@
'_srcDomXML': self._dom.XMLDesc(0),
'elapsedTimeOffset': time.time() - self._startTime}
- def _padMemoryVolume(memoryVolPath, sdUUID):
- sdType = sd.name2type(
- self.cif.irs.getStorageDomainInfo(sdUUID)['info']['type'])
- if sdType in sd.FILE_DOMAIN_TYPES:
- if sdType == sd.NFS_DOMAIN:
- oop.getProcessPool(sdUUID).fileUtils. \
- padToBlockSize(memoryVolPath)
- else:
- fileUtils.padToBlockSize(memoryVolPath)
+ def _padMemoryVolume(memoryVolPath):
+ if not utils.isBlockDevice(memoryVolPath):
+ fileUtils.padToBlockSize(memoryVolPath)
snap = xml.dom.minidom.Element('domainsnapshot')
disks = xml.dom.minidom.Element('disks')
--
To view, visit http://gerrit.ovirt.org/26538
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I2a94354e188019f3afd209633979ec5a5b35293b
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Arik Hadas <ahadas(a)redhat.com>
7 years, 7 months
Change in vdsm[master]: clientIF: Remove unnecessary device is disk check
by sgotliv@redhat.com
Sergey Gotliv has uploaded a new change for review.
Change subject: clientIF: Remove unnecessary device is disk check
......................................................................
clientIF: Remove unnecessary device is disk check
Change-Id: I98317e805e6770df5dacd3237a383aaca78fde1e
Signed-off-by: Sergey Gotliv <sgotliv(a)redhat.com>
---
M vdsm/clientIF.py
1 file changed, 1 insertion(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/63/22363/1
diff --git a/vdsm/clientIF.py b/vdsm/clientIF.py
index c083991..124f8e5 100644
--- a/vdsm/clientIF.py
+++ b/vdsm/clientIF.py
@@ -244,7 +244,7 @@
def prepareVolumePath(self, drive, vmId=None):
if type(drive) is dict:
# PDIV drive format
- if drive['device'] == 'disk' and vm.isVdsmImage(drive):
+ if vm.isVdsmImage(drive):
res = self.irs.prepareImage(
drive['domainID'], drive['poolID'],
drive['imageID'], drive['volumeID'])
--
To view, visit http://gerrit.ovirt.org/22363
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I98317e805e6770df5dacd3237a383aaca78fde1e
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Sergey Gotliv <sgotliv(a)redhat.com>
7 years, 7 months
Change in vdsm[master]: Added gluster tag support in getAllTasks()
by barumuga@redhat.com
Hello Ayal Baron, Timothy Asir, Saggi Mizrahi, Federico Simoncelli, Dan Kenigsberg,
I'd like you to do a code review. Please visit
http://gerrit.ovirt.org/7579
to review the following change.
Change subject: Added gluster tag support in getAllTasks()
......................................................................
Added gluster tag support in getAllTasks()
If param tag is empty, all tasks including gluster tasks are returned,
else tasks those tags are in param tag list are returned.
As below verbs are not consumed by engine/RHS-C yet, its OK to differ in
compatibility issue now.
glusterVolumeRebalanceStart
glusterVolumeRebalanceStatus
glusterVolumeReplaceBrickStart
glusterVolumeReplaceBrickStatus
glusterVolumeRemoveBrickStart
glusterVolumeRemoveBrickStatus
Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432
Signed-off-by: Bala.FA <barumuga(a)redhat.com>
---
M tests/gluster_cli_tests.py
M vdsm/gluster/cli.py
M vdsm/gluster/exception.py
M vdsm/storage/taskManager.py
4 files changed, 367 insertions(+), 95 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/79/7579/1
diff --git a/tests/gluster_cli_tests.py b/tests/gluster_cli_tests.py
index f442893..9c6357c 100644
--- a/tests/gluster_cli_tests.py
+++ b/tests/gluster_cli_tests.py
@@ -28,6 +28,7 @@
from gluster import cli as gcli
except ImportError:
pass
+import xml.etree.cElementTree as etree
class GlusterCliTests(TestCaseBase):
@@ -115,3 +116,74 @@
def test_parsePeerStatus(self):
self._parsePeerStatus_empty_test()
self._parsePeerStatus_test()
+
+ def _parseVolumeStatusAll_test(self):
+ out = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<cliOutput>
+ <opRet>0</opRet>
+ <opErrno>0</opErrno>
+ <opErrstr></opErrstr>
+ <volumes>
+ <volume>
+ <name>V1</name>
+ <id>03eace73-9197-49d0-a877-831bc6e9dac2</id>
+ <tasks>
+ <task>
+ <name>rebalance</name>
+ <id>12345473-9197-49d0-a877-831bc6e9dac2</id>
+ </task>
+ </tasks>
+ </volume>
+ <volume>
+ <name>V2</name>
+ <id>03eace73-1237-49d0-a877-831bc6e9dac2</id>
+ <tasks>
+ <task>
+ <name>replace-brick</name>
+ <id>12345473-1237-49d0-a877-831bc6e9dac2</id>
+ <sourceBrick>192.168.122.167:/tmp/V2-b1</sourceBrick>
+ <destBrick>192.168.122.168:/tmp/V2-b1</destBrick>
+ </task>
+ </tasks>
+ </volume>
+ <volume>
+ <name>V3</name>
+ <id>03eace73-1237-1230-a877-831bc6e9dac2</id>
+ <tasks>
+ <task>
+ <name>remove-brick</name>
+ <id>12345473-1237-1230-a877-831bc6e9dac2</id>
+ <BrickCount>2</BrickCount>
+ <brick>192.168.122.167:/tmp/V3-b1</brick>
+ <brick>192.168.122.168:/tmp/V3-b1</brick>
+ </task>
+ </tasks>
+ </volume>
+ </volumes>
+</cliOutput>"""
+ tree = etree.fromstring(out)
+ status = gcli._parseVolumeStatusAll(tree)
+ self.assertEquals(status,
+ {'12345473-1237-1230-a877-831bc6e9dac2':
+ {'bricks': ['192.168.122.167:/tmp/V3-b1',
+ '192.168.122.168:/tmp/V3-b1'],
+ 'taskType': 'remove-brick',
+ 'volumeId':
+ '03eace73-1237-1230-a877-831bc6e9dac2',
+ 'volumeName': 'V3'},
+ '12345473-1237-49d0-a877-831bc6e9dac2':
+ {'bricks': ['192.168.122.167:/tmp/V2-b1',
+ '192.168.122.168:/tmp/V2-b1'],
+ 'taskType': 'replace-brick',
+ 'volumeId':
+ '03eace73-1237-49d0-a877-831bc6e9dac2',
+ 'volumeName': 'V2'},
+ '12345473-9197-49d0-a877-831bc6e9dac2':
+ {'bricks': [],
+ 'taskType': 'rebalance',
+ 'volumeId':
+ '03eace73-9197-49d0-a877-831bc6e9dac2',
+ 'volumeName': 'V1'}})
+
+ def test_parseVolumeStatusAll(self):
+ self._parseVolumeStatusAll_test()
diff --git a/vdsm/gluster/cli.py b/vdsm/gluster/cli.py
index 95de106..1f464f6 100644
--- a/vdsm/gluster/cli.py
+++ b/vdsm/gluster/cli.py
@@ -84,6 +84,55 @@
raise ge.GlusterCmdFailedException(rc=rv, err=[msg])
+class TaskType:
+ REBALANCE = 'rebalance'
+ REPLACE_BRICK = 'replace-brick'
+ REMOVE_BRICK = 'remove-brick'
+
+
+def _parseVolumeStatusAll(tree):
+ """
+ returns {TaskId: {'volumeName': VolumeName,
+ 'volumeId': VolumeId,
+ 'taskType': TaskType,
+ 'bricks': BrickList}, ...}
+ """
+ tasks = {}
+ for el in tree.findall('volumes/volume'):
+ volumeName = el.find('name').text
+ volumeId = el.find('id').text
+ for c in el.findall('tasks/task'):
+ taskType = c.find('name').text
+ taskId = c.find('id').text
+ bricks = []
+ if taskType == TaskType.REPLACE_BRICK:
+ bricks.append(c.find('sourceBrick').text)
+ bricks.append(c.find('destBrick').text)
+ elif taskType == TaskType.REMOVE_BRICK:
+ for b in c.findall('brick'):
+ bricks.append(b.text)
+ elif taskType == TaskType.REBALANCE:
+ pass
+ tasks[taskId] = {'volumeName': volumeName,
+ 'volumeId': volumeId,
+ 'taskType': taskType,
+ 'bricks': bricks}
+ return tasks
+
+
+@exportToSuperVdsm
+def volumeStatusAll():
+ command = _getGlusterVolCmd() + ["status", "all"]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeStatusAllFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseVolumeStatusAll(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
+
+
def _parseVolumeInfo(out):
if not out[0].strip():
del out[0]
@@ -300,11 +349,15 @@
command.append("start")
if force:
command.append("force")
- rc, out, err = _execGluster(command)
- if rc:
- raise ge.GlusterVolumeRebalanceStartFailedException(rc, out, err)
- else:
- return True
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRebalanceStartFailedException(rc=e.rc,
+ err=e.err)
+ try:
+ return {'taskId': xmltree.find('id').text}
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
@exportToSuperVdsm
@@ -312,84 +365,147 @@
command = _getGlusterVolCmd() + ["rebalance", volumeName, "stop"]
if force:
command.append('force')
- rc, out, err = _execGluster(command)
- if rc:
- raise ge.GlusterVolumeRebalanceStopFailedException(rc, out, err)
- else:
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRebalanceStopFailedException(rc=e.rc,
+ err=e.err)
+
+
+class TaskStatus():
+ RUNNING = 'RUNNING'
+ FAILED = 'FAILED'
+ COMPLETED = 'COMPLETED'
+
+
+def _parseVolumeRebalanceRemoveBrickStatus(xmltree, mode):
+ """
+ returns {'taskId': UUID,
+ 'host': [{'name': NAME,
+ 'id': HOSTID,
+ 'filesScanned': INT,
+ 'filesMoved': INT,
+ 'filesFailed': INT,
+ 'totalSizeMoved': INT,
+ 'status': TaskStatus},...]
+ 'summary': {'filesScanned': INT,
+ 'filesMoved': INT,
+ 'filesFailed': INT,
+ 'totalSizeMoved': INT,
+ 'status': TaskStatus}}
+ """
+ if mode == 'rebalance':
+ tree = xmltree.find('volRebalance')
+ elif mode == 'remove-brick':
+ tree = xmltree.find('volRemoveBrick')
+ else:
+ return
+ status = \
+ {'taskId': tree.find('id').text,
+ 'summary': \
+ {'filesScanned': int(tree.find('summary/filesScanned').text),
+ 'filesMoved': int(tree.find('summary/filesMoved').text),
+ 'filesFailed': int(tree.find('summary/filesFailed').text),
+ 'totalSizeMoved': int(tree.find('summary/totalSizeMoved').text),
+ 'status': tree.find('summary/status').text},
+ 'host': []}
+ for el in tree.findall('node'):
+ status['host'].append({'name': el.find('name').text,
+ 'id': el.find('id').text,
+ 'filesScanned':
+ int(el.find('filesScanned').text),
+ 'filesMoved': int(el.find('filesMoved').text),
+ 'filesFailed': int(el.find('filesFailed').text),
+ 'totalSizeMoved':
+ int(el.find('totalSizeMoved').text),
+ 'status': el.find('status').text})
+ return status
+
+
+def _parseVolumeRebalanceStatus(tree):
+ return _parseVolumeRebalanceRemoveBrickStatus(tree, 'rebalance')
@exportToSuperVdsm
def volumeRebalanceStatus(volumeName):
- rc, out, err = _execGluster(_getGlusterVolCmd() + ["rebalance", volumeName,
- "status"])
- if rc:
- raise ge.GlusterVolumeRebalanceStatusFailedException(rc, out, err)
- if 'in progress' in out[0]:
- return BrickStatus.RUNNING, "\n".join(out)
- elif 'complete' in out[0]:
- return BrickStatus.COMPLETED, "\n".join(out)
- else:
- return BrickStatus.UNKNOWN, "\n".join(out)
+ command = _getGlusterVolCmd() + ["rebalance", volumeName, "status"]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRebalanceStatusFailedException(rc=e.rc,
+ err=e.err)
+ try:
+ return _parseVolumeRebalanceStatus(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
@exportToSuperVdsm
def volumeReplaceBrickStart(volumeName, existingBrick, newBrick):
- rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick",
- volumeName,
- existingBrick, newBrick,
- "start"])
- if rc:
- raise ge.GlusterVolumeReplaceBrickStartFailedException(rc, out, err)
- else:
- return True
+ command = _getGlusterVolCmd() + ["replace-brick", volumeName,
+ existingBrick, newBrick, "start"]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeReplaceBrickStartFailedException(rc=e.rc,
+ err=e.err)
+ try:
+ return {'taskId': xmltree.find('id').text}
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
@exportToSuperVdsm
def volumeReplaceBrickAbort(volumeName, existingBrick, newBrick):
- rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick",
- volumeName,
- existingBrick, newBrick,
- "abort"])
- if rc:
- raise ge.GlusterVolumeReplaceBrickAbortFailedException(rc, out, err)
- else:
+ command = _getGlusterVolCmd() + ["replace-brick", volumeName,
+ existingBrick, newBrick, "abort"]
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeReplaceBrickAbortFailedException(rc=e.rc,
+ err=e.err)
@exportToSuperVdsm
def volumeReplaceBrickPause(volumeName, existingBrick, newBrick):
- rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick",
- volumeName,
- existingBrick, newBrick,
- "pause"])
- if rc:
- raise ge.GlusterVolumeReplaceBrickPauseFailedException(rc, out, err)
- else:
+ command = _getGlusterVolCmd() + ["replace-brick", volumeName,
+ existingBrick, newBrick, "pause"]
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeReplaceBrickPauseFailedException(rc=e.rc,
+ err=e.err)
+
+
+def _parseVolumeReplaceBrickStatus(tree):
+ """
+ returns {'taskId': UUID,
+ 'filesMoved': INT,
+ 'movingFile': STRING,
+ 'status': TaskStatus}}
+ """
+ return {'taskId': tree.find('volReplaceBrick/id').text,
+ 'filesMoved': int(tree.find('volReplaceBrick/filesMoved').text),
+ 'movingFile': tree.find('volReplaceBrick/movingFile').text,
+ 'status': tree.find('volReplaceBrick/status').text}
@exportToSuperVdsm
def volumeReplaceBrickStatus(volumeName, existingBrick, newBrick):
- rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick",
- volumeName,
- existingBrick, newBrick,
- "status"])
- if rc:
- raise ge.GlusterVolumeReplaceBrickStatusFailedException(rc, out,
- err)
- message = "\n".join(out)
- statLine = out[0].strip().upper()
- if BrickStatus.PAUSED in statLine:
- return BrickStatus.PAUSED, message
- elif statLine.endswith('MIGRATION COMPLETE'):
- return BrickStatus.COMPLETED, message
- elif statLine.startswith('NUMBER OF FILES MIGRATED'):
- return BrickStatus.RUNNING, message
- elif statLine.endswith("UNKNOWN"):
- return BrickStatus.UNKNOWN, message
- else:
- return BrickStatus.NA, message
+ command = _getGlusterVolCmd() + ["replace-brick", volumeName,
+ existingBrick, newBrick, "status"]
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeReplaceBrickStatusFailedException(rc=e.rc,
+ err=e.err)
+ try:
+ return _parseVolumeReplaceBrickStatus(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
@exportToSuperVdsm
@@ -399,12 +515,12 @@
existingBrick, newBrick, "commit"]
if force:
command.append('force')
- rc, out, err = _execGluster(command)
- if rc:
- raise ge.GlusterVolumeReplaceBrickCommitFailedException(rc, out,
- err)
- else:
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeReplaceBrickCommitFailedException(rc=e.rc,
+ err=e.err)
@exportToSuperVdsm
@@ -413,12 +529,15 @@
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["start"]
-
- rc, out, err = _execGluster(command)
- if rc:
- raise ge.GlusterVolumeRemoveBrickStartFailedException(rc, out, err)
- else:
- return True
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRemoveBrickStartFailedException(rc=e.rc,
+ err=e.err)
+ try:
+ return {'taskId': xmltree.find('id').text}
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
@exportToSuperVdsm
@@ -427,12 +546,16 @@
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["stop"]
- rc, out, err = _execGluster(command)
-
- if rc:
- raise ge.GlusterVolumeRemoveBrickStopFailedException(rc, out, err)
- else:
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRemoveBrickStopFailedException(rc=e.rc,
+ err=e.err)
+
+
+def _parseVolumeRemoveBrickStatus(tree):
+ return _parseVolumeRebalanceRemoveBrickStatus(tree, 'remove-brick')
@exportToSuperVdsm
@@ -441,12 +564,15 @@
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["status"]
- rc, out, err = _execGluster(command)
-
- if rc:
- raise ge.GlusterVolumeRemoveBrickStatusFailedException(rc, out, err)
- else:
- return "\n".join(out)
+ try:
+ xmltree, out = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRemoveBrickStatusFailedException(rc=e.rc,
+ err=e.err)
+ try:
+ return _parseVolumeRemoveBrickStatus(xmltree)
+ except:
+ raise ge.GlusterXmlErrorException(err=out)
@exportToSuperVdsm
@@ -455,12 +581,12 @@
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["commit"]
- rc, out, err = _execGluster(command)
-
- if rc:
- raise ge.GlusterVolumeRemoveBrickCommitFailedException(rc, out, err)
- else:
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRemoveBrickCommitFailedException(rc=e.rc,
+ err=e.err)
@exportToSuperVdsm
@@ -469,12 +595,12 @@
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["force"]
- rc, out, err = _execGluster(command)
-
- if rc:
- raise ge.GlusterVolumeRemoveBrickForceFailedException(rc, out, err)
- else:
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRemoveBrickForceFailedException(rc=e.rc,
+ err=e.err)
@exportToSuperVdsm
diff --git a/vdsm/gluster/exception.py b/vdsm/gluster/exception.py
index f4f497b..f209885 100644
--- a/vdsm/gluster/exception.py
+++ b/vdsm/gluster/exception.py
@@ -323,6 +323,11 @@
message = "Volume remove brick force failed"
+class GlusterVolumeStatusAllFailedException(GlusterVolumeException):
+ code = 4158
+ message = "Volume status all failed"
+
+
# Host
class GlusterHostException(GlusterException):
code = 4400
diff --git a/vdsm/storage/taskManager.py b/vdsm/storage/taskManager.py
index 3bc12f3..0a269cd 100644
--- a/vdsm/storage/taskManager.py
+++ b/vdsm/storage/taskManager.py
@@ -25,6 +25,12 @@
import storage_exception as se
from task import Task, Job, TaskCleanType
from threadPool import ThreadPool
+try:
+ from gluster import cli as gcli
+ from gluster import exception as ge
+ _glusterEnabled = True
+except ImportError:
+ _glusterEnabled = False
class TaskManager:
@@ -113,19 +119,82 @@
self.log.debug("Return: %s", subRes)
return subRes
- def getAllTasks(self):
+ def _getAllGlusterTasks(self):
"""
- Return Tasks for all public tasks.
+ Return all gluster tasks
+ """
+ subRes = {}
+ if not _glusterEnabled:
+ return subRes
+
+ for taskId, value in gcli.volumeStatusAll():
+ msg = ''
+ state = ''
+ try:
+ if value['taskType'] == gcli.TaskType.REBALANCE:
+ status = gcli.volumeRebalanceStatus(value['volumeName'])
+ msg = ('Files [scanned: %d, moved: %d, failed: %d], '
+ 'Total size moved: %d') % \
+ (status['summary']['filesScanned'],
+ status['summary']['filesMoved'],
+ status['summary']['filesFailed'],
+ status['summary']['totalSizeMoved'])
+ state = status['summary']['status']
+ elif value['taskType'] == gcli.TaskType.REMOVE_BRICK:
+ status = gcli.volumeRemoveBrickStatus(value['volumeName'],
+ value['bricks'])
+ msg = ('Files [scanned: %d, moved: %d, failed: %d], '
+ 'Total size moved: %d') % \
+ (status['summary']['filesScanned'],
+ status['summary']['filesMoved'],
+ status['summary']['filesFailed'],
+ status['summary']['totalSizeMoved'])
+ state = status['summary']['status']
+ elif value['taskType'] == gcli.TaskType.REPLACE_BRICK:
+ status = gcli.volumeReplaceBrickStatus(value['volumeName'],
+ value['bricks'][0],
+ value['bricks'][1])
+ msg = 'Files moved: %d, Moving file: %s' % \
+ (status['filesMoved'], status['movingFile'])
+ state = status['status']
+ except ge.GlusterException:
+ self.log.error("gluster exception occured", exc_info=True)
+
+ subRes[taskId] = {"id": taskId,
+ "verb": value['volumeName'],
+ "state": state,
+ "code": value['taskType'],
+ "message": msg,
+ "result": '',
+ "tag": 'gluster'}
+ return subRes
+
+ def getAllTasks(self, tag=[]):
+ """
+ Return Tasks for all public tasks if param tag is empty,
+ else return tasks those tags are in param tag.
"""
self.log.debug("Entry.")
subRes = {}
for taskID, task in self._tasks.items():
try:
- subRes[taskID] = task.getDetails()
+ if not tag:
+ subRes[taskID] = task.getDetails()
+ elif task.getTags() in tag:
+ subRes[taskID] = task.getDetails()
except se.UnknownTask:
# Return info for existing tasks only.
self.log.warn("Unknown task %s. Maybe task was already "
"cleared.", taskID)
+
+ try:
+ if not tag:
+ subRes.update(self._getAllGlusterTasks())
+ elif 'gluster' in tag:
+ subRes.update(self._getAllGlusterTasks())
+ except ge.GlusterException:
+ self.log.error("gluster exception occured", exc_info=True)
+
self.log.debug("Return: %s", subRes)
return subRes
--
To view, visit http://gerrit.ovirt.org/7579
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Bala.FA <barumuga(a)redhat.com>
Gerrit-Reviewer: Ayal Baron <abaron(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Federico Simoncelli <fsimonce(a)redhat.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
Gerrit-Reviewer: Timothy Asir <tjeyasin(a)redhat.com>
7 years, 8 months
Change in vdsm[master]: tests: Fix AttributeError: 'module' object has no attribute ...
by osvoboda@redhat.com
Ondřej Svoboda has uploaded a new change for review.
Change subject: tests: Fix AttributeError: 'module' object has no attribute 'ifcfg'
......................................................................
tests: Fix AttributeError: 'module' object has no attribute 'ifcfg'
This allows running NOSE_EXCLUDE='.*' make rpm on Fedora 20 again.
Change-Id: Ibe715c9f4c0f6b2b832f897467ba551416c52432
Signed-off-by: Ondřej Svoboda <osvoboda(a)redhat.com>
---
M tests/configNetworkTests.py
1 file changed, 4 insertions(+), 5 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/08/31108/1
diff --git a/tests/configNetworkTests.py b/tests/configNetworkTests.py
index 83a4b73..afeaa65 100644
--- a/tests/configNetworkTests.py
+++ b/tests/configNetworkTests.py
@@ -25,8 +25,9 @@
from testlib import VdsmTestCase as TestCaseBase
from monkeypatch import MonkeyPatch
-from network import api, configurators
+from network import api
from network import errors
+from network.configurators import ifcfg
from network.models import Bond, Bridge, Nic, Vlan
@@ -51,10 +52,8 @@
@MonkeyPatch(netinfo, 'networks', _fakeNetworks)
@MonkeyPatch(netinfo, 'getMaxMtu', lambda *x: 1500)
@MonkeyPatch(netinfo, 'getMtu', lambda *x: 1500)
- @MonkeyPatch(configurators.ifcfg, 'ifdown', lambda *x:
- _raiseInvalidOpException())
- @MonkeyPatch(configurators.ifcfg, 'ifup',
- lambda *x: _raiseInvalidOpException())
+ @MonkeyPatch(ifcfg, 'ifdown', lambda *x: _raiseInvalidOpException())
+ @MonkeyPatch(ifcfg, 'ifup', lambda *x: _raiseInvalidOpException())
@MonkeyPatch(Bond, 'configure', lambda *x: _raiseInvalidOpException())
@MonkeyPatch(Bridge, 'configure', lambda *x: _raiseInvalidOpException())
@MonkeyPatch(Nic, 'configure', lambda *x: _raiseInvalidOpException())
--
To view, visit http://gerrit.ovirt.org/31108
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ibe715c9f4c0f6b2b832f897467ba551416c52432
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Ondřej Svoboda <osvoboda(a)redhat.com>
7 years, 8 months
Change in vdsm[master]: qemuimg: add support for convert progress
by Federico Simoncelli
Federico Simoncelli has uploaded a new change for review.
Change subject: qemuimg: add support for convert progress
......................................................................
qemuimg: add support for convert progress
Change-Id: Id0b53e418c62bb2e91444ba5f351c916ca417299
Signed-off-by: Federico Simoncelli <fsimonce(a)redhat.com>
---
M lib/vdsm/qemuimg.py
M tests/qemuimgTests.py
M vdsm/storage/image.py
3 files changed, 172 insertions(+), 58 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/10/33910/1
diff --git a/lib/vdsm/qemuimg.py b/lib/vdsm/qemuimg.py
index 39cd394..9ca9c58 100644
--- a/lib/vdsm/qemuimg.py
+++ b/lib/vdsm/qemuimg.py
@@ -160,9 +160,9 @@
return check
-def convert(srcImage, dstImage, stop, srcFormat=None, dstFormat=None,
+def convert(srcImage, dstImage, srcFormat=None, dstFormat=None,
backing=None, backingFormat=None):
- cmd = [_qemuimg.cmd, "convert", "-t", "none"]
+ cmd = [_qemuimg.cmd, "convert", "-p", "-t", "none"]
options = []
cwdPath = None
@@ -190,14 +190,66 @@
cmd.append(dstImage)
- (rc, out, err) = utils.watchCmd(
- cmd, cwd=cwdPath, stop=stop, nice=utils.NICENESS.HIGH,
- ioclass=utils.IOCLASS.IDLE)
+ return QemuImgProcess(cmd, cwd=cwdPath)
- if rc != 0:
- raise QImgError(rc, out, err)
- return (rc, out, err)
+class QemuImgProcess(object):
+ REGEXPR = re.compile(r'\(([\d.]+)/100%\)')
+
+ def __init__(self, cmd, cwd=None):
+ self.progress = 0.0
+
+ self._stdout = bytearray()
+ self._stderr = bytearray()
+
+ cmd = utils.ionice_cmd(cmd, utils.IOCLASS.IDLE)
+ cmd = utils.nice_cmd(cmd, utils.NICENESS.HIGH)
+
+ self.execution = utils.CommandStream(
+ cmd, self._recvstdout, self._recvstderr, cwd=cwd,
+ deathSignal=signal.SIGKILL)
+
+ def _recvstderr(self, buffer):
+ self._stderr += buffer
+
+ def _recvstdout(self, buffer):
+ last_progress = None
+ self._stdout += buffer
+
+ while True:
+ try:
+ idx = self._stdout.index('\r')
+ except ValueError:
+ break
+
+ last_progress = self._stdout[:idx]
+ del self._stdout[:idx + 1]
+
+ if last_progress:
+ m = self.REGEXPR.match(last_progress.strip())
+ if m is None:
+ raise ValueError(
+ 'Unable to parse: "%s"' % last_progress)
+ self.progress = float(m.group(1))
+
+ @property
+ def stderr(self):
+ return str(self._stderr)
+
+ def wait(self, timeout=None):
+ returncode = self.execution.wait(timeout=timeout)
+
+ if (self.execution.returncode is not None
+ and self.execution.returncode != 0):
+ raise QImgError(returncode, "", self.stderr)
+
+ return returncode
+
+ def terminate(self):
+ self.execution.terminate()
+
+ def kill(self):
+ self.execution.kill()
def resize(image, newSize, format=None):
diff --git a/tests/qemuimgTests.py b/tests/qemuimgTests.py
index abc0750..9e61de0 100644
--- a/tests/qemuimgTests.py
+++ b/tests/qemuimgTests.py
@@ -163,12 +163,11 @@
def test_no_format(self):
def convert(cmd, **kw):
- expected = [QEMU_IMG, 'convert', '-t', 'none', 'src', 'dst']
+ expected = [QEMU_IMG, 'convert', '-p', '-t', 'none', 'src', 'dst']
self.assertEqual(cmd, expected)
- return 0, '', ''
- with FakeCmd(utils, 'watchCmd', convert):
- qemuimg.convert('src', 'dst', True)
+ with FakeCmd(qemuimg, 'QemuImgProcess', convert):
+ qemuimg.convert('src', 'dst')
def test_qcow2_compat_unsupported(self):
def qcow2_compat_unsupported(cmd, **kw):
@@ -176,14 +175,13 @@
return 0, 'Supported options:\nsize ...\n', ''
def convert(cmd, **kw):
- expected = [QEMU_IMG, 'convert', '-t', 'none', 'src', '-O',
+ expected = [QEMU_IMG, 'convert', '-p', '-t', 'none', 'src', '-O',
'qcow2', 'dst']
self.assertEqual(cmd, expected)
- return 0, '', ''
with FakeCmd(utils, 'execCmd', qcow2_compat_unsupported):
- with FakeCmd(utils, 'watchCmd', convert):
- qemuimg.convert('src', 'dst', True, dstFormat='qcow2')
+ with FakeCmd(qemuimg, 'QemuImgProcess', convert):
+ qemuimg.convert('src', 'dst', dstFormat='qcow2')
def qcow2_compat_supported(self, cmd, **kw):
self.check_supports_qcow2_compat(cmd, **kw)
@@ -191,14 +189,13 @@
def test_qcow2_compat_supported(self):
def convert(cmd, **kw):
- expected = [QEMU_IMG, 'convert', '-t', 'none', 'src', '-O',
+ expected = [QEMU_IMG, 'convert', '-p', '-t', 'none', 'src', '-O',
'qcow2', '-o', 'compat=0.10', 'dst']
self.assertEqual(cmd, expected)
- return 0, '', ''
with FakeCmd(utils, 'execCmd', self.qcow2_compat_supported):
- with FakeCmd(utils, 'watchCmd', convert):
- qemuimg.convert('src', 'dst', True, dstFormat='qcow2')
+ with FakeCmd(qemuimg, 'QemuImgProcess', convert):
+ qemuimg.convert('src', 'dst', dstFormat='qcow2')
def check_supports_qcow2_compat(self, cmd, **kw):
expected = [QEMU_IMG, 'convert', '-O', 'qcow2', '-o', '?', '/dev/null',
@@ -207,49 +204,95 @@
def test_qcow2_no_backing_file(self):
def qcow2_no_backing_file(cmd, **kw):
- expected = [QEMU_IMG, 'convert', '-t', 'none', 'source', '-O',
- 'qcow2', '-o', 'compat=0.10', 'target']
+ expected = [QEMU_IMG, 'convert', '-p', '-t', 'none', 'source',
+ '-O', 'qcow2', '-o', 'compat=0.10', 'target']
self.assertEqual(cmd, expected)
- return 0, '', ''
with FakeCmd(utils, 'execCmd', self.qcow2_compat_supported):
- with FakeCmd(utils, 'watchCmd', qcow2_no_backing_file):
- qemuimg.convert('source', 'target', None, dstFormat='qcow2')
+ with FakeCmd(qemuimg, 'QemuImgProcess', qcow2_no_backing_file):
+ qemuimg.convert('source', 'target', dstFormat='qcow2')
def test_qcow2_backing_file(self):
def qcow2_backing_file(cmd, **kw):
- expected = [QEMU_IMG, 'convert', '-t', 'none', 'source', '-O',
- 'qcow2', '-o', 'compat=0.10,backing_file=backing',
- 'target']
+ expected = [QEMU_IMG, 'convert', '-p', '-t', 'none', 'source',
+ '-O', 'qcow2',
+ '-o', 'compat=0.10,backing_file=backing', 'target']
self.assertEqual(cmd, expected)
- return 0, '', ''
with FakeCmd(utils, 'execCmd', self.qcow2_compat_supported):
- with FakeCmd(utils, 'watchCmd', qcow2_backing_file):
- qemuimg.convert('source', 'target', None, dstFormat='qcow2',
+ with FakeCmd(qemuimg, 'QemuImgProcess', qcow2_backing_file):
+ qemuimg.convert('source', 'target', dstFormat='qcow2',
backing='backing')
def test_qcow2_backing_format(self):
def qcow2_backing_format(cmd, **kw):
- expected = [QEMU_IMG, 'convert', '-t', 'none', 'source', '-O',
- 'qcow2', '-o', 'compat=0.10', 'target']
+ expected = [QEMU_IMG, 'convert', '-p', '-t', 'none', 'source',
+ '-O', 'qcow2', '-o', 'compat=0.10', 'target']
self.assertEqual(cmd, expected)
- return 0, '', ''
with FakeCmd(utils, 'execCmd', self.qcow2_compat_supported):
- with FakeCmd(utils, 'watchCmd', qcow2_backing_format):
- qemuimg.convert('source', 'target', None, dstFormat='qcow2',
+ with FakeCmd(qemuimg, 'QemuImgProcess', qcow2_backing_format):
+ qemuimg.convert('source', 'target', dstFormat='qcow2',
backingFormat='qcow2')
def test_qcow2_backing_file_and_format(self):
def qcow2_backing_format(cmd, **kw):
- expected = [QEMU_IMG, 'convert', '-t', 'none', 'source', '-O',
- 'qcow2', '-o', 'compat=0.10,backing_file=backing,'
+ expected = [QEMU_IMG, 'convert', '-p', '-t', 'none', 'source',
+ '-O', 'qcow2',
+ '-o', 'compat=0.10,backing_file=backing,'
'backing_fmt=qcow2', 'target']
self.assertEqual(cmd, expected)
- return 0, '', ''
with FakeCmd(utils, 'execCmd', self.qcow2_compat_supported):
- with FakeCmd(utils, 'watchCmd', qcow2_backing_format):
- qemuimg.convert('source', 'target', None, dstFormat='qcow2',
+ with FakeCmd(qemuimg, 'QemuImgProcess', qcow2_backing_format):
+ qemuimg.convert('source', 'target', dstFormat='qcow2',
backing='backing', backingFormat='qcow2')
+
+
+class QemuImgProcessTests(TestCaseBase):
+ PROGRESS_FORMAT = " (%.2f/100%%)\r"
+
+ @staticmethod
+ def _progress_iterator():
+ return map(lambda x: x / 100.0, xrange(0, 10000, 1))
+
+ def test_progress_simple(self):
+ p = qemuimg.QemuImgProcess([])
+
+ for progress in self._progress_iterator():
+ p._recvstdout(self.PROGRESS_FORMAT % progress)
+ self.assertEquals(p.progress, progress)
+
+ self.assertEquals(p.wait(), 0)
+
+ def test_progress_incomplete(self):
+ p = qemuimg.QemuImgProcess([])
+
+ for progress in self._progress_iterator():
+ stdout = self.PROGRESS_FORMAT % progress
+ p._recvstdout(stdout[:12])
+ p._recvstdout(stdout[12:])
+ self.assertEquals(p.progress, progress)
+
+ self.assertEquals(p.wait(), 0)
+
+ def test_progress_batch(self):
+ p = qemuimg.QemuImgProcess([])
+
+ p._recvstdout(
+ (self.PROGRESS_FORMAT % 10.00) +
+ (self.PROGRESS_FORMAT % 25.00) +
+ (self.PROGRESS_FORMAT % 33.33))
+
+ self.assertEquals(p.progress, 33.33)
+ self.assertEquals(p.wait(), 0)
+
+ def test_unexpected_output(self):
+ p = qemuimg.QemuImgProcess([])
+
+ self.assertRaises(ValueError, p._recvstdout, "Hello World\r")
+
+ p._recvstdout("Hello ")
+ self.assertRaises(ValueError, p._recvstdout, "World\r")
+
+ self.assertEquals(p.wait(), 0)
diff --git a/vdsm/storage/image.py b/vdsm/storage/image.py
index 41e3f30..cdcd82a 100644
--- a/vdsm/storage/image.py
+++ b/vdsm/storage/image.py
@@ -93,6 +93,7 @@
"""
log = logging.getLogger('Storage.Image')
_fakeTemplateLock = threading.Lock()
+ _QEMU_LOGGING_INTERVAL = 60.0
@classmethod
def createImageRollback(cls, taskObj, imageDir):
@@ -109,6 +110,24 @@
def __init__(self, repoPath):
self.repoPath = repoPath
+
+ def qemuImgConvert(self, *args, **kwargs):
+ self.log.debug('starting qemu-img operation')
+ command = qemuimg.convert(*args, **kwargs)
+
+ def abortImgConversion():
+ self.log.info('aborting ongoing qemu-img operation')
+ command.terminate()
+
+ retcode = None
+
+ with vars.task.abort_callback(abortImgConversion):
+ while retcode is None:
+ retcode = command.wait(self._QEMU_LOGGING_INTERVAL)
+ self.log.debug('qemu-img operation progress: %s%%',
+ command.progress)
+
+ self.log.debug('qemu-img operation has completed: %s', retcode)
def create(self, sdUUID, imgUUID):
"""Create placeholder for image's volumes
@@ -444,13 +463,12 @@
backingFormat = None
self.log.debug("start qemu convert")
- qemuimg.convert(srcVol.getVolumePath(),
- dstVol.getVolumePath(),
- vars.task.aborting,
- srcFormat=srcFormat,
- dstFormat=dstFormat,
- backing=backing,
- backingFormat=backingFormat)
+ self.qemuImgConvert(srcVol.getVolumePath(),
+ dstVol.getVolumePath(),
+ srcFormat=srcFormat,
+ dstFormat=dstFormat,
+ backing=backing,
+ backingFormat=backingFormat)
except ActionStopped:
raise
except se.StorageException:
@@ -830,10 +848,11 @@
dstVol.prepare(rw=True, setrw=True)
try:
- qemuimg.convert(volParams['path'], dstPath,
- vars.task.aborting,
- volume.fmt2str(volParams['volFormat']),
- volume.fmt2str(dstVolFormat))
+ self.qemuImgConvert(
+ volParams['path'],
+ dstPath,
+ srcFormat=volume.fmt2str(volParams['volFormat']),
+ dstFormat=volume.fmt2str(dstVolFormat))
except ActionStopped:
raise
except qemuimg.QImgError:
@@ -1045,7 +1064,7 @@
# volume and rebase successor's children (if exists) on top of it.
# Step 1: Create an empty volume named sucessor_MERGE similar to
# ancestor volume.
- # Step 2: qemuimg.convert successor -> sucessor_MERGE
+ # Step 2: qemuImgConvert successor -> sucessor_MERGE
# Step 3: Rename successor to _remove_me__successor
# Step 4: Rename successor_MERGE to successor
# Step 5: Unsafely rebase successor's children on top of temporary
@@ -1071,11 +1090,11 @@
# Step 2: Convert successor to new volume
# qemu-img convert -f qcow2 successor -O raw newUUID
try:
- qemuimg.convert(srcVolParams['path'],
- newVol.getVolumePath(),
- vars.task.aborting,
- volume.fmt2str(srcVolParams['volFormat']),
- volume.fmt2str(volParams['volFormat']))
+ self.qemuImgConvert(
+ srcVolParams['path'],
+ newVol.getVolumePath(),
+ srcFormat=volume.fmt2str(srcVolParams['volFormat']),
+ dstFormat=volume.fmt2str(volParams['volFormat']))
except qemuimg.QImgError:
self.log.exception('conversion failure for volume %s',
srcVol.volUUID)
--
To view, visit http://gerrit.ovirt.org/33910
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Id0b53e418c62bb2e91444ba5f351c916ca417299
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Federico Simoncelli <fsimonce(a)redhat.com>
7 years, 8 months
Change in vdsm[master]: vm: Modify and save state after hotunplugging the disk
by xfrancis@redhat.com
Xavi Francisco has uploaded a new change for review.
Change subject: vm: Modify and save state after hotunplugging the disk
......................................................................
vm: Modify and save state after hotunplugging the disk
The rationale behind this patch is to modify the way the hotunplug
process is executed. Now when the hotunplug command is called we first
remove the disk from the internal state and after that the libvirt
hotunplug command is executed.The problem with that approach is
that if VDSM restarts between the modification of the internal
state and the actual unplugging of the disk, the disk never gets
unplugged leaving the system in an inconsistent state.
This patch solves the issue by executing the modification of the
internal state after the actual unplugging has successfully happened.
This state is later saved so if either the disk has been unplugged or
not VDSM is able to recover its state in case of a restart.
Change-Id: I5169fa16591283de33aa82c5730626bfd5d3eaf5
Signed-off-by: Xavi Francisco <xfrancis(a)redhat.com>
---
M vdsm/virt/vm.py
1 file changed, 12 insertions(+), 20 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/87/28187/1
diff --git a/vdsm/virt/vm.py b/vdsm/virt/vm.py
index bd670b9..1eb2adc 100644
--- a/vdsm/virt/vm.py
+++ b/vdsm/virt/vm.py
@@ -3449,20 +3449,6 @@
driveXml = drive.getXML().toprettyxml(encoding='utf-8')
self.log.debug("Hotunplug disk xml: %s", driveXml)
# Remove found disk from vm's drives list
- if isVdsmImage(drive):
- self.sdIds.remove(drive.domainID)
- self._devices[DISK_DEVICES].remove(drive)
- # Find and remove disk device from vm's conf
- diskDev = None
- for dev in self.conf['devices'][:]:
- if (dev['type'] == DISK_DEVICES and
- dev['path'] == drive.path):
- with self._confLock:
- self.conf['devices'].remove(dev)
- diskDev = dev
- break
-
- self.saveState()
hooks.before_disk_hotunplug(driveXml, self.conf,
params=drive.custom)
@@ -3472,19 +3458,25 @@
self.log.error("Hotunplug failed", exc_info=True)
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return errCode['noVM']
- self._devices[DISK_DEVICES].append(drive)
- # Restore disk device in vm's conf and _devices
- if diskDev:
- with self._confLock:
- self.conf['devices'].append(diskDev)
- self.saveState()
return {
'status': {'code': errCode['hotunplugDisk']['status']['code'],
'message': e.message}}
else:
+ if isVdsmImage(drive):
+ self.sdIds.remove(drive.domainID)
+ self._devices[DISK_DEVICES].remove(drive)
+ # Find and remove disk device from vm's conf
+ for dev in self.conf['devices'][:]:
+ if (dev['type'] == DISK_DEVICES and
+ dev['path'] == drive.path):
+ with self._confLock:
+ self.conf['devices'].remove(dev)
+ break
hooks.after_disk_hotunplug(driveXml, self.conf,
params=drive.custom)
self._cleanupDrives(drive)
+ finally:
+ self.saveState()
return {'status': doneCode, 'vmList': self.status()}
--
To view, visit http://gerrit.ovirt.org/28187
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I5169fa16591283de33aa82c5730626bfd5d3eaf5
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Xavi Francisco <xfrancis(a)redhat.com>
7 years, 8 months
Change in vdsm[master]: incorporate stop/start logic to functional tests
by ykleinbe@redhat.com
Yoav Kleinberger has uploaded a new change for review.
Change subject: incorporate stop/start logic to functional tests
......................................................................
incorporate stop/start logic to functional tests
Change-Id: I55b86578a681de2b09ac5ee01ca2e3b9443ed13a
Signed-off-by: Yoav Kleinberger <ykleinbe(a)redhat.com>
---
M tests/functional/new/basicLocalFSStorageDomainTest.py
A tests/functional/new/controlvdsm.py
2 files changed, 65 insertions(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/23/29423/1
diff --git a/tests/functional/new/basicLocalFSStorageDomainTest.py b/tests/functional/new/basicLocalFSStorageDomainTest.py
index a590886..3280e16 100644
--- a/tests/functional/new/basicLocalFSStorageDomainTest.py
+++ b/tests/functional/new/basicLocalFSStorageDomainTest.py
@@ -6,10 +6,22 @@
import uuid
import storage.volume
import storage.image
+import logging
+logging.basicConfig(level=logging.DEBUG, format='%(asctime)s TEST %(levelname)s: %(message)s')
from . import testlib
+from . import controlvdsm
class TestBasicLocalFSStorageDomain:
- def notest_flow_connect_create_storage_domain_format_disconnect(self):
+ def setup(self):
+ control_vdsm = controlvdsm.ControlVDSM()
+ control_vdsm.cleanup()
+
+ @classmethod
+ def teardown_class(cls):
+ control_vdsm = controlvdsm.ControlVDSM()
+ control_vdsm.cleanup()
+
+ def test_flow_connect_create_storage_domain_format_disconnect(self):
with testlib.TemporaryDirectory() as directory:
with testlib.VDSMTestTools() as (vdsm, verify):
diff --git a/tests/functional/new/controlvdsm.py b/tests/functional/new/controlvdsm.py
new file mode 100644
index 0000000..a7e8e79
--- /dev/null
+++ b/tests/functional/new/controlvdsm.py
@@ -0,0 +1,52 @@
+import subprocess
+import logging
+import vdsm.vdscli
+import socket
+import vdsm.config
+import time
+
+class ControlVDSM:
+ def cleanup(self):
+ self._stop_service()
+ assert not self._service_running()
+ self._brutally_clean_files()
+# self._restart_service()
+ return self._check_connection()
+
+ def _check_connection(self):
+ useSSL = vdsm.config.config.getboolean('vars', 'ssl')
+ vdsmClient = vdsm.vdscli.connect(useSSL=useSSL)
+ RETRIES = 5
+ for _ in range(RETRIES):
+ try:
+ vdsmClient.getStorageDomainsList()
+ logging.info('VDSM ready for testing')
+ except socket.error as e:
+ logging.warning('could not talk to VDSM: %s' % e)
+ time.sleep(1)
+
+ raise Exception('could not connect to VDSM')
+
+ def _stop_service(self):
+ self._run("sudo service vdsmd stop")
+
+ def _service_running(self):
+ return_code = subprocess.call('sudo service vdsmd status', shell=True, stdout=open('/dev/null','w'), stderr=open('/dev/null','w'))
+ logging.info('vdsm running: %s' % (return_code == 0))
+ return return_code == 0
+
+ def _restart_service(self):
+ self._run("sudo vdsm-tool configure --force")
+ self._run("sudo service vdsmd start")
+
+ def _run(self, command):
+ logging.info('running: %s' % command)
+ return_code = subprocess.call(command, shell=True, close_fds=True, stdout=open('/dev/null','w'), stderr=open('/dev/null','w'))
+ if return_code != 0:
+ logging.warning('failure! command was: %s' % command)
+ else:
+ logging.info('finished.')
+
+ def _brutally_clean_files(self):
+ logging.warning('removing /rhev/data-center without asking too many questions')
+ self._run('sudo rm -fr /rhev/data-center/*')
--
To view, visit http://gerrit.ovirt.org/29423
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I55b86578a681de2b09ac5ee01ca2e3b9443ed13a
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Yoav Kleinberger <ykleinbe(a)redhat.com>
7 years, 8 months