Hello Ayal Baron, Timothy Asir, Saggi Mizrahi, Federico Simoncelli, Dan Kenigsberg,
I'd like you to do a code review. Please visit
http://gerrit.ovirt.org/10200
to review the following change.
Change subject: gluster: add gluster task support
......................................................................
gluster: add gluster task support
gluster volume operations like rebalance, replace-brick, remove-brick
are async operations which needs to be tracked as async tasks in
oVirt. This is done by introducing below new verbs and changes in
existing rebalance, replace-brick, remove-brick verbs.
New verb:
* glusterTaskActionPerform
* glusterTasksList
- return value structure:
[{"id": TASKID,
"verb": VOLUMENAME,
"state": TaskStatus,
"code": TaskType,
"message": STRING,
"result": '',
"tag": 'gluster'}, ...]
As below verbs are not consumed by engine/RHS-C yet, its OK to differ in
compatibility issue now.
glusterVolumeRebalanceStart
glusterVolumeRebalanceStatus
glusterVolumeReplaceBrickStart
glusterVolumeReplaceBrickStatus
glusterVolumeRemoveBrickStart
glusterVolumeRemoveBrickStatus
Change-Id: I154df353bc6f23001d7bf61b8f5345abd2019cb6
Signed-off-by: Bala.FA <barumuga(a)redhat.com>
---
M tests/gluster_cli_tests.py
M vdsm/gluster/api.py
M vdsm/gluster/cli.py
M vdsm/gluster/exception.py
M vdsm_cli/vdsClientGluster.py
5 files changed, 762 insertions(+), 169 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/00/10200/1
diff --git a/tests/gluster_cli_tests.py b/tests/gluster_cli_tests.py
index b5dedbb..227e6f8 100644
--- a/tests/gluster_cli_tests.py
+++ b/tests/gluster_cli_tests.py
@@ -1067,3 +1067,231 @@
def test_parseVolumeProfileInfo(self):
self._parseVolumeProfileInfo_test()
self._parseVolumeProfileInfoNfs_test()
+
+ def test_parseVolumeStatusAll(self):
+ out = """<?xml version="1.0"
encoding="UTF-8" standalone="yes"?>
+<cliOutput>
+ <opRet>0</opRet>
+ <opErrno>0</opErrno>
+ <opErrstr></opErrstr>
+ <volumes>
+ <volume>
+ <name>V1</name>
+ <id>03eace73-9197-49d0-a877-831bc6e9dac2</id>
+ <tasks>
+ <task>
+ <name>rebalance</name>
+ <id>12345473-9197-49d0-a877-831bc6e9dac2</id>
+ </task>
+ </tasks>
+ </volume>
+ <volume>
+ <name>V2</name>
+ <id>03eace73-1237-49d0-a877-831bc6e9dac2</id>
+ <tasks>
+ <task>
+ <name>replace-brick</name>
+ <id>12345473-1237-49d0-a877-831bc6e9dac2</id>
+ <sourceBrick>192.168.122.167:/tmp/V2-b1</sourceBrick>
+ <destBrick>192.168.122.168:/tmp/V2-b1</destBrick>
+ </task>
+ </tasks>
+ </volume>
+ <volume>
+ <name>V3</name>
+ <id>03eace73-1237-1230-a877-831bc6e9dac2</id>
+ <tasks>
+ <task>
+ <name>remove-brick</name>
+ <id>12345473-1237-1230-a877-831bc6e9dac2</id>
+ <BrickCount>2</BrickCount>
+ <brick>192.168.122.167:/tmp/V3-b1</brick>
+ <brick>192.168.122.168:/tmp/V3-b1</brick>
+ </task>
+ </tasks>
+ </volume>
+ </volumes>
+</cliOutput>
+"""
+ ostatus = {'12345473-1237-1230-a877-831bc6e9dac2':
+ {'bricks': ['192.168.122.167:/tmp/V3-b1',
+ '192.168.122.168:/tmp/V3-b1'],
+ 'taskType': 'remove-brick',
+ 'volumeId':
'03eace73-1237-1230-a877-831bc6e9dac2',
+ 'volumeName': 'V3'},
+ '12345473-1237-49d0-a877-831bc6e9dac2':
+ {'bricks': ['192.168.122.167:/tmp/V2-b1',
+ '192.168.122.168:/tmp/V2-b1'],
+ 'taskType': 'replace-brick',
+ 'volumeId':
'03eace73-1237-49d0-a877-831bc6e9dac2',
+ 'volumeName': 'V2'},
+ '12345473-9197-49d0-a877-831bc6e9dac2':
+ {'bricks': [],
+ 'taskType': 'rebalance',
+ 'volumeId':
'03eace73-9197-49d0-a877-831bc6e9dac2',
+ 'volumeName': 'V1'}}
+ tree = etree.fromstring(out)
+ status = gcli._parseVolumeStatusAll(tree)
+ self.assertEquals(status, ostatus)
+
+ def test_parseVolumeRebalanceStatus(self):
+ out = """<?xml version="1.0"
encoding="UTF-8" standalone="yes"?>
+<cliOutput>
+ <opRet>0</opRet>
+ <opErrno>0</opErrno>
+ <opErrstr></opErrstr>
+ <volRebalance>
+ <id>03eace73-9197-49d0-a877-831bc6e9dac2</id>
+ <node>
+ <nodeName>192.168.122.2</nodeName>
+ <lookups>7628</lookups>
+ <files>273</files>
+ <failures>0</failures>
+ <size>468918728</size>
+ <status>RUNNING</status>
+ </node>
+ <node>
+ <nodeName>FC16-1</nodeName>
+ <lookups>2734</lookups>
+ <files>765</files>
+ <failures>57</failures>
+ <size>918728</size>
+ <status>FAILED</status>
+ </node>
+ <node>
+ <nodeName>FC16-2</nodeName>
+ <lookups>456</lookups>
+ <files>62</files>
+ <failures>0</failures>
+ <size>192876</size>
+ <status>COMPLETED</status>
+ </node>
+ <aggregate>
+ <lookups>10818</lookups>
+ <files>1100</files>
+ <failures>57</failures>
+ <size>470030332</size>
+ <status>RUNNING</status>
+ </aggregate>
+ </volRebalance>
+</cliOutput>
+"""
+ ostatus = {'host': [{'name': '192.168.122.2',
+ 'filesScanned': 7628,
+ 'filesMoved': 273,
+ 'filesFailed': 0,
+ 'totalSizeMoved': 468918728,
+ 'status': gcli.TaskStatus.RUNNING},
+ {'name': 'FC16-1',
+ 'filesScanned': 2734,
+ 'filesMoved': 765,
+ 'filesFailed': 57,
+ 'totalSizeMoved': 918728,
+ 'status': gcli.TaskStatus.FAILED},
+ {'name': 'FC16-2',
+ 'filesScanned': 456,
+ 'filesMoved': 62,
+ 'filesFailed': 0,
+ 'totalSizeMoved': 192876,
+ 'status': gcli.TaskStatus.COMPLETED}],
+ 'summary': {'filesScanned': 10818,
+ 'filesMoved': 1100,
+ 'filesFailed': 57,
+ 'totalSizeMoved': 470030332,
+ 'status': gcli.TaskStatus.RUNNING},
+ 'taskId': '03eace73-9197-49d0-a877-831bc6e9dac2'}
+ tree = etree.fromstring(out)
+ status = gcli._parseVolumeRebalanceStatus(tree)
+ self.assertEquals(status, ostatus)
+
+ def test_parseVolumeRemoveBrickStatus(self):
+ out = """<?xml version="1.0"
encoding="UTF-8" standalone="yes"?>
+<cliOutput>
+ <opRet>0</opRet>
+ <opErrno>0</opErrno>
+ <opErrstr></opErrstr>
+ <volRemoveBrick>
+ <id>03eace73-9197-49d0-a877-831bc6e9dac2</id>
+ <node>
+ <nodeName>192.168.122.2</nodeName>
+ <lookups>7628</lookups>
+ <files>273</files>
+ <failures>0</failures>
+ <size>468918728</size>
+ <status>RUNNING</status>
+ </node>
+ <node>
+ <nodeName>FC16-1</nodeName>
+ <lookups>2734</lookups>
+ <files>765</files>
+ <failures>57</failures>
+ <size>918728</size>
+ <status>FAILED</status>
+ </node>
+ <node>
+ <nodeName>FC16-2</nodeName>
+ <lookups>456</lookups>
+ <files>62</files>
+ <failures>0</failures>
+ <size>192876</size>
+ <status>COMPLETED</status>
+ </node>
+ <aggregate>
+ <lookups>10818</lookups>
+ <files>1100</files>
+ <failures>57</failures>
+ <size>470030332</size>
+ <status>RUNNING</status>
+ </aggregate>
+ </volRemoveBrick>
+</cliOutput>
+"""
+ ostatus = {'host': [{'name': '192.168.122.2',
+ 'filesScanned': 7628,
+ 'filesMoved': 273,
+ 'filesFailed': 0,
+ 'totalSizeMoved': 468918728,
+ 'status': gcli.TaskStatus.RUNNING},
+ {'name': 'FC16-1',
+ 'filesScanned': 2734,
+ 'filesMoved': 765,
+ 'filesFailed': 57,
+ 'totalSizeMoved': 918728,
+ 'status': gcli.TaskStatus.FAILED},
+ {'name': 'FC16-2',
+ 'filesScanned': 456,
+ 'filesMoved': 62,
+ 'filesFailed': 0,
+ 'totalSizeMoved': 192876,
+ 'status': gcli.TaskStatus.COMPLETED}],
+ 'summary': {'filesScanned': 10818,
+ 'filesMoved': 1100,
+ 'filesFailed': 57,
+ 'totalSizeMoved': 470030332,
+ 'status': gcli.TaskStatus.RUNNING},
+ 'taskId': '03eace73-9197-49d0-a877-831bc6e9dac2'}
+ tree = etree.fromstring(out)
+ status = gcli._parseVolumeRemoveBrickStatus(tree)
+ self.assertEquals(status, ostatus)
+
+ def test_parseVolumeReplaceBrickStatus(self):
+ out = """<?xml version="1.0"
encoding="UTF-8" standalone="yes"?>
+<cliOutput>
+ <opRet>0</opRet>
+ <opErrno>0</opErrno>
+ <opErrstr></opErrstr>
+ <volReplaceBrick>
+ <id>03eace73-9197-49d0-a877-831bc6e9dac2</id>
+ <filesMoved>273</filesMoved>
+ <movingFile>pixmaps/logfactor5.png</movingFile>
+ <status>RUNNING</status>
+ </volReplaceBrick>
+</cliOutput>
+"""
+ ostatus = {'filesMoved': 273,
+ 'movingFile': 'pixmaps/logfactor5.png',
+ 'status': gcli.TaskStatus.RUNNING,
+ 'taskId': '03eace73-9197-49d0-a877-831bc6e9dac2'}
+ tree = etree.fromstring(out)
+ status = gcli._parseVolumeReplaceBrickStatus(tree)
+ self.assertEquals(status, ostatus)
diff --git a/vdsm/gluster/api.py b/vdsm/gluster/api.py
index 5f0b0ed..2121ffd 100644
--- a/vdsm/gluster/api.py
+++ b/vdsm/gluster/api.py
@@ -19,9 +19,12 @@
#
from functools import wraps
+import logging
from vdsm.define import doneCode
import supervdsm as svdsm
+from cli import TaskType, TaskAction
+import exception as ge
_SUCCESS = {'status': doneCode}
@@ -45,11 +48,22 @@
The gluster interface of vdsm.
"""
+ svdsmProxy = svdsm.getProxy()
+ _taskActionMap = \
+ {TaskType.REBALANCE:
+ {TaskAction.STOP: svdsmProxy.glusterVolumeRebalanceStop},
+ TaskType.REPLACE_BRICK:
+ {TaskAction.STOP: svdsmProxy.glusterVolumeReplaceBrickStop,
+ TaskAction.ABORT: svdsmProxy.glusterVolumeReplaceBrickAbort,
+ TaskAction.PAUSE: svdsmProxy.glusterVolumeReplaceBrickPause,
+ TaskAction.COMMIT: svdsmProxy.glusterVolumeReplaceBrickCommit},
+ TaskType.REMOVE_BRICK:
+ {TaskAction.STOP: svdsmProxy.glusterVolumeRemoveBrickStop,
+ TaskAction.COMMIT: svdsmProxy.glusterVolumeRemoveBrickCommit}}
def __init__(self, cif, log):
self.cif = cif
self.log = log
- self.svdsmProxy = svdsm.getProxy()
@exportAsVerb
def volumesList(self, volumeName=None, options=None):
@@ -95,9 +109,9 @@
@exportAsVerb
def volumeRebalanceStart(self, volumeName, rebalanceType="",
force=False, options=None):
- self.svdsmProxy.glusterVolumeRebalanceStart(volumeName,
- rebalanceType,
- force)
+ return self.svdsmProxy.glusterVolumeRebalanceStart(volumeName,
+ rebalanceType,
+ force)
@exportAsVerb
def volumeRebalanceStop(self, volumeName, force=False, options=None):
@@ -105,15 +119,15 @@
@exportAsVerb
def volumeRebalanceStatus(self, volumeName, options=None):
- st, msg = self.svdsmProxy.glusterVolumeRebalanceStatus(volumeName)
- return {'rebalance': st, 'message': msg}
+ return {'volumeStatus':
+ self.svdsmProxy.glusterVolumeRebalanceStatus(volumeName)}
@exportAsVerb
def volumeReplaceBrickStart(self, volumeName, existingBrick, newBrick,
options=None):
- self.svdsmProxy.glusterVolumeReplaceBrickStart(volumeName,
- existingBrick,
- newBrick)
+ return self.svdsmProxy.glusterVolumeReplaceBrickStart(volumeName,
+ existingBrick,
+ newBrick)
@exportAsVerb
def volumeReplaceBrickAbort(self, volumeName, existingBrick, newBrick,
@@ -132,10 +146,10 @@
@exportAsVerb
def volumeReplaceBrickStatus(self, volumeName, oldBrick, newBrick,
options=None):
- st, msg = self.svdsmProxy.glusterVolumeReplaceBrickStatus(volumeName,
- oldBrick,
- newBrick)
- return {'replaceBrick': st, 'message': msg}
+ return {'volumeStatus':
+ self.svdsmProxy.glusterVolumeReplaceBrickStatus(volumeName,
+ oldBrick,
+ newBrick)}
@exportAsVerb
def volumeReplaceBrickCommit(self, volumeName, existingBrick, newBrick,
@@ -148,8 +162,9 @@
@exportAsVerb
def volumeRemoveBrickStart(self, volumeName, brickList,
replicaCount=0, options=None):
- self.svdsmProxy.glusterVolumeRemoveBrickStart(volumeName, brickList,
- replicaCount)
+ return self.svdsmProxy.glusterVolumeRemoveBrickStart(volumeName,
+ brickList,
+ replicaCount)
@exportAsVerb
def volumeRemoveBrickStop(self, volumeName, brickList,
@@ -160,10 +175,10 @@
@exportAsVerb
def volumeRemoveBrickStatus(self, volumeName, brickList,
replicaCount=0, options=None):
- message = self.svdsmProxy.glusterVolumeRemoveBrickStatus(volumeName,
- brickList,
- replicaCount)
- return {'message': message}
+ status = self.svdsmProxy.glusterVolumeRemoveBrickStatus(volumeName,
+ brickList,
+ replicaCount)
+ return {'volumeStatus': status}
@exportAsVerb
def volumeRemoveBrickCommit(self, volumeName, brickList,
@@ -186,6 +201,91 @@
return {'volumeStatus': status}
@exportAsVerb
+ def taskActionPerform(self, taskId, action, options=None):
+ tasks = self.svdsmProxy.glusterVolumeStatusAll()
+ if taskId not in tasks:
+ raise ge.GlusterTaskNotFoundException(taskId)
+
+ act = getattr(TaskAction, action, None)
+ if not act:
+ raise ge.GlusterTaskActionNotFoundException(taskId, action)
+
+ value = tasks[taskId]
+ taskType = value['taskType']
+ if act in self._taskActionMap[taskType]:
+ raise ge.GlusterTaskActionUnsupportedException(taskId,
+ taskType,
+ action)
+
+ func = self._taskActionMap[taskType][act]
+ if taskType == TaskType.REBALANCE:
+ func(value['volumeName'])
+ elif taskType == TaskType.REMOVE_BRICK:
+ func(value['volumeName'], value['bricks'])
+ elif taskType == TaskType.REPLACE_BRICK:
+ func(value['volumeName'], value['bricks'][0],
value['bricks'][1])
+ else:
+ raise ge.GlusterTaskTypeUnknownException(taskId, taskType)
+
+ @exportAsVerb
+ def tasksList(self, options=None):
+ """
+ Return all gluster tasks as
+ [{"id": TASKID,
+ "verb": VOLUMENAME,
+ "state": TaskStatus,
+ "code": TaskType,
+ "message": STRING,
+ "result": '',
+ "tag": 'gluster'}, ...]
+ """
+ subRes = {}
+ for taskId, value in self.svdsmProxy.glusterVolumeStatusAll():
+ try:
+ msg = ''
+ state = ''
+ if value['taskType'] == TaskType.REBALANCE:
+ status = self.svdsmProxy.\
+ glusterVolumeRebalanceStatus(value['volumeName'])
+ msg = ('Files [scanned: %d, moved: %d, failed: %d], '
+ 'Total size moved: %d') % \
+ (status['summary']['filesScanned'],
+ status['summary']['filesMoved'],
+ status['summary']['filesFailed'],
+ status['summary']['totalSizeMoved'])
+ state = status['summary']['status']
+ elif value['taskType'] == TaskType.REMOVE_BRICK:
+ status = self.svdsmProxy.\
+ glusterVolumeRemoveBrickStatus(value['volumeName'],
+ value['bricks'])
+ msg = ('Files [scanned: %d, moved: %d, failed: %d], '
+ 'Total size moved: %d') % \
+ (status['summary']['filesScanned'],
+ status['summary']['filesMoved'],
+ status['summary']['filesFailed'],
+ status['summary']['totalSizeMoved'])
+ state = status['summary']['status']
+ elif value['taskType'] == TaskType.REPLACE_BRICK:
+ status = self.svdsmProxy.\
+ glusterVolumeReplaceBrickStatus(value['volumeName'],
+ value['bricks'][0],
+ value['bricks'][1])
+ msg = 'Files moved: %d, Moving file: %s' % \
+ (status['filesMoved'], status['movingFile'])
+ state = status['status']
+
+ subRes[taskId] = {"id": taskId,
+ "verb": value['volumeName'],
+ "state": state,
+ "code": value['taskType'],
+ "message": msg,
+ "result": '',
+ "tag": 'gluster'}
+ except ge.GlusterException:
+ logging.error("gluster exception occured", exc_info=True)
+ return subRes
+
+ @exportAsVerb
def hostAdd(self, hostName, options=None):
self.svdsmProxy.glusterPeerProbe(hostName)
diff --git a/vdsm/gluster/cli.py b/vdsm/gluster/cli.py
index 7136281..c3f2ed8 100644
--- a/vdsm/gluster/cli.py
+++ b/vdsm/gluster/cli.py
@@ -72,6 +72,25 @@
RDMA = 'RDMA'
+class TaskType:
+ REBALANCE = 'REBALANCE'
+ REPLACE_BRICK = 'REPLACE_BRICK'
+ REMOVE_BRICK = 'REMOVE_BRICK'
+
+
+class TaskStatus:
+ RUNNING = 'RUNNING'
+ FAILED = 'FAILED'
+ COMPLETED = 'COMPLETED'
+
+
+class TaskAction:
+ STOP: 'STOP'
+ ABORT: 'ABORT'
+ PAUSE: 'PAUSE'
+ COMMIT: 'COMMIT'
+
+
def _execGluster(cmd):
return utils.execCmd(cmd)
@@ -303,6 +322,50 @@
return _parseVolumeStatusMem(xmltree)
else:
return _parseVolumeStatus(xmltree)
+ except (etree.ParseError, AttributeError, ValueError):
+ raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
+
+
+def _parseVolumeStatusAll(tree):
+ """
+ returns {TaskId: {'volumeName': VolumeName,
+ 'volumeId': VolumeId,
+ 'taskType': TaskType,
+ 'bricks': BrickList}, ...}
+ """
+ tasks = {}
+ for el in tree.findall('volumes/volume'):
+ volumeName = el.find('name').text
+ volumeId = el.find('id').text
+ for c in el.findall('tasks/task'):
+ taskType = c.find('name').text
+ taskType = taskType.upper().replace('-', '_')
+ taskId = c.find('id').text
+ bricks = []
+ if taskType == TaskType.REPLACE_BRICK:
+ bricks.append(c.find('sourceBrick').text)
+ bricks.append(c.find('destBrick').text)
+ elif taskType == TaskType.REMOVE_BRICK:
+ for b in c.findall('brick'):
+ bricks.append(b.text)
+ elif taskType == TaskType.REBALANCE:
+ pass
+ tasks[taskId] = {'volumeName': volumeName,
+ 'volumeId': volumeId,
+ 'taskType': taskType,
+ 'bricks': bricks}
+ return tasks
+
+
+@exportToSuperVdsm
+def volumeStatusAll():
+ command = _getGlusterVolCmd() + ["status", "all"]
+ try:
+ xmltree = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeStatusAllFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseVolumeStatusAll(xmltree)
except (etree.ParseError, AttributeError, ValueError):
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@@ -554,11 +617,15 @@
command.append("start")
if force:
command.append("force")
- rc, out, err = _execGluster(command)
- if rc:
- raise ge.GlusterVolumeRebalanceStartFailedException(rc, out, err)
- else:
- return True
+ try:
+ xmltree = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRebalanceStartFailedException(rc=e.rc,
+ err=e.err)
+ try:
+ return {'taskId': xmltree.find('id').text}
+ except (etree.ParseError, AttributeError, ValueError):
+ raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@exportToSuperVdsm
@@ -566,84 +633,141 @@
command = _getGlusterVolCmd() + ["rebalance", volumeName,
"stop"]
if force:
command.append('force')
- rc, out, err = _execGluster(command)
- if rc:
- raise ge.GlusterVolumeRebalanceStopFailedException(rc, out, err)
- else:
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRebalanceStopFailedException(rc=e.rc,
+ err=e.err)
+
+
+def _parseVolumeRebalanceRemoveBrickStatus(xmltree, mode):
+ """
+ returns {'taskId': UUID,
+ 'host': [{'name': NAME,
+ 'id': HOSTID,
+ 'filesScanned': INT,
+ 'filesMoved': INT,
+ 'filesFailed': INT,
+ 'totalSizeMoved': INT,
+ 'status': TaskStatus},...]
+ 'summary': {'filesScanned': INT,
+ 'filesMoved': INT,
+ 'filesFailed': INT,
+ 'totalSizeMoved': INT,
+ 'status': TaskStatus}}
+ """
+ if mode == 'rebalance':
+ tree = xmltree.find('volRebalance')
+ elif mode == 'remove-brick':
+ tree = xmltree.find('volRemoveBrick')
+ else:
+ return
+ status = \
+ {'taskId': tree.find('id').text,
+ 'summary': \
+ {'filesScanned': int(tree.find('aggregate/lookups').text),
+ 'filesMoved': int(tree.find('aggregate/files').text),
+ 'filesFailed': int(tree.find('aggregate/failures').text),
+ 'totalSizeMoved': int(tree.find('aggregate/size').text),
+ #'status': tree.find('aggregate/status').text},
+ 'host': []}
+ for el in tree.findall('node'):
+ status['host'].append({'name': el.find('nodeName').text,
+ #'id': el.find('id').text,
+ 'filesScanned':
+ int(el.find('lookups').text),
+ 'filesMoved': int(el.find('files').text),
+ 'filesFailed':
int(el.find('failures').text),
+ 'totalSizeMoved':
+ int(el.find('size').text),
+ 'status': el.find('status').text})
+ return status
+
+
+def _parseVolumeRebalanceStatus(tree):
+ return _parseVolumeRebalanceRemoveBrickStatus(tree, 'rebalance')
@exportToSuperVdsm
def volumeRebalanceStatus(volumeName):
- rc, out, err = _execGluster(_getGlusterVolCmd() + ["rebalance",
volumeName,
- "status"])
- if rc:
- raise ge.GlusterVolumeRebalanceStatusFailedException(rc, out, err)
- if 'in progress' in out[0]:
- return BrickStatus.RUNNING, "\n".join(out)
- elif 'complete' in out[0]:
- return BrickStatus.COMPLETED, "\n".join(out)
- else:
- return BrickStatus.UNKNOWN, "\n".join(out)
+ command = _getGlusterVolCmd() + ["rebalance", volumeName,
"status"]
+ try:
+ xmltree = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRebalanceStatusFailedException(rc=e.rc,
+ err=e.err)
+ try:
+ return _parseVolumeRebalanceStatus(xmltree)
+ except (etree.ParseError, AttributeError, ValueError):
+ raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@exportToSuperVdsm
def volumeReplaceBrickStart(volumeName, existingBrick, newBrick):
- rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick",
- volumeName,
- existingBrick, newBrick,
- "start"])
- if rc:
- raise ge.GlusterVolumeReplaceBrickStartFailedException(rc, out, err)
- else:
- return True
+ command = _getGlusterVolCmd() + ["replace-brick", volumeName,
+ existingBrick, newBrick, "start"]
+ try:
+ xmltree = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeReplaceBrickStartFailedException(rc=e.rc,
+ err=e.err)
+ try:
+ return {'taskId': xmltree.find('id').text}
+ except (etree.ParseError, AttributeError, ValueError):
+ raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@exportToSuperVdsm
def volumeReplaceBrickAbort(volumeName, existingBrick, newBrick):
- rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick",
- volumeName,
- existingBrick, newBrick,
- "abort"])
- if rc:
- raise ge.GlusterVolumeReplaceBrickAbortFailedException(rc, out, err)
- else:
+ command = _getGlusterVolCmd() + ["replace-brick", volumeName,
+ existingBrick, newBrick, "abort"]
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeReplaceBrickAbortFailedException(rc=e.rc,
+ err=e.err)
@exportToSuperVdsm
def volumeReplaceBrickPause(volumeName, existingBrick, newBrick):
- rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick",
- volumeName,
- existingBrick, newBrick,
- "pause"])
- if rc:
- raise ge.GlusterVolumeReplaceBrickPauseFailedException(rc, out, err)
- else:
+ command = _getGlusterVolCmd() + ["replace-brick", volumeName,
+ existingBrick, newBrick, "pause"]
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeReplaceBrickPauseFailedException(rc=e.rc,
+ err=e.err)
+
+
+def _parseVolumeReplaceBrickStatus(tree):
+ """
+ returns {'taskId': UUID,
+ 'filesMoved': INT,
+ 'movingFile': STRING,
+ 'status': TaskStatus}
+ """
+ return {'taskId': tree.find('volReplaceBrick/id').text,
+ 'filesMoved':
int(tree.find('volReplaceBrick/filesMoved').text),
+ 'movingFile': tree.find('volReplaceBrick/movingFile').text,
+ 'status': tree.find('volReplaceBrick/status').text}
@exportToSuperVdsm
def volumeReplaceBrickStatus(volumeName, existingBrick, newBrick):
- rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick",
- volumeName,
- existingBrick, newBrick,
- "status"])
- if rc:
- raise ge.GlusterVolumeReplaceBrickStatusFailedException(rc, out,
- err)
- message = "\n".join(out)
- statLine = out[0].strip().upper()
- if BrickStatus.PAUSED in statLine:
- return BrickStatus.PAUSED, message
- elif statLine.endswith('MIGRATION COMPLETE'):
- return BrickStatus.COMPLETED, message
- elif statLine.startswith('NUMBER OF FILES MIGRATED'):
- return BrickStatus.RUNNING, message
- elif statLine.endswith("UNKNOWN"):
- return BrickStatus.UNKNOWN, message
- else:
- return BrickStatus.NA, message
+ command = _getGlusterVolCmd() + ["replace-brick", volumeName,
+ existingBrick, newBrick, "status"]
+ try:
+ xmltree = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeReplaceBrickStatusFailedException(rc=e.rc,
+ err=e.err)
+ try:
+ return _parseVolumeReplaceBrickStatus(xmltree)
+ except (etree.ParseError, AttributeError, ValueError):
+ raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@exportToSuperVdsm
@@ -653,12 +777,12 @@
existingBrick, newBrick, "commit"]
if force:
command.append('force')
- rc, out, err = _execGluster(command)
- if rc:
- raise ge.GlusterVolumeReplaceBrickCommitFailedException(rc, out,
- err)
- else:
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeReplaceBrickCommitFailedException(rc=e.rc,
+ err=e.err)
@exportToSuperVdsm
@@ -667,12 +791,15 @@
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["start"]
-
- rc, out, err = _execGluster(command)
- if rc:
- raise ge.GlusterVolumeRemoveBrickStartFailedException(rc, out, err)
- else:
- return True
+ try:
+ xmltree = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRemoveBrickStartFailedException(rc=e.rc,
+ err=e.err)
+ try:
+ return {'taskId': xmltree.find('id').text}
+ except (etree.ParseError, AttributeError, ValueError):
+ raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@exportToSuperVdsm
@@ -681,12 +808,16 @@
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["stop"]
- rc, out, err = _execGluster(command)
-
- if rc:
- raise ge.GlusterVolumeRemoveBrickStopFailedException(rc, out, err)
- else:
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRemoveBrickStopFailedException(rc=e.rc,
+ err=e.err)
+
+
+def _parseVolumeRemoveBrickStatus(tree):
+ return _parseVolumeRebalanceRemoveBrickStatus(tree, 'remove-brick')
@exportToSuperVdsm
@@ -695,12 +826,15 @@
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["status"]
- rc, out, err = _execGluster(command)
-
- if rc:
- raise ge.GlusterVolumeRemoveBrickStatusFailedException(rc, out, err)
- else:
- return "\n".join(out)
+ try:
+ xmltree = _execGlusterXml(command)
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRemoveBrickStatusFailedException(rc=e.rc,
+ err=e.err)
+ try:
+ return _parseVolumeRemoveBrickStatus(xmltree)
+ except (etree.ParseError, AttributeError, ValueError):
+ raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@exportToSuperVdsm
@@ -709,12 +843,12 @@
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["commit"]
- rc, out, err = _execGluster(command)
-
- if rc:
- raise ge.GlusterVolumeRemoveBrickCommitFailedException(rc, out, err)
- else:
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRemoveBrickCommitFailedException(rc=e.rc,
+ err=e.err)
@exportToSuperVdsm
@@ -723,12 +857,12 @@
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["force"]
- rc, out, err = _execGluster(command)
-
- if rc:
- raise ge.GlusterVolumeRemoveBrickForceFailedException(rc, out, err)
- else:
+ try:
+ _execGlusterXml(command)
return True
+ except ge.GlusterCmdFailedException, e:
+ raise ge.GlusterVolumeRemoveBrickForceFailedException(rc=e.rc,
+ err=e.err)
@exportToSuperVdsm
diff --git a/vdsm/gluster/exception.py b/vdsm/gluster/exception.py
index e921d7d..bcc7835 100644
--- a/vdsm/gluster/exception.py
+++ b/vdsm/gluster/exception.py
@@ -351,6 +351,56 @@
message = "Volume profile info failed"
+class GlusterVolumeStatusAllFailedException(GlusterVolumeException):
+ code = 4161
+ message = "Volume status all failed"
+
+
+class GlusterTaskNotFoundException(GlusterVolumeException):
+ code = 4162
+ message = "Task not found"
+
+ def __init__(self, taskId):
+ self.taskId = taskId
+ s = 'task id: %s' % taskId
+ self.err = [s]
+
+
+class GlusterTaskActionNotFoundException(GlusterVolumeException):
+ code = 4163
+ message = "Task action not found"
+
+ def __init__(self, taskId, action):
+ self.taskId = taskId
+ self.action = action
+ s = 'Action %s not found for task %s' % (action, taskId)
+ self.err = [s]
+
+
+class GlusterTaskActionUnsupportedException(GlusterVolumeException):
+ code = 4164
+ message = "Task action unsupported"
+
+ def __init__(self, taskId, taskType, action):
+ self.taskId = taskId
+ self.taskType = taskType
+ self.action = action
+ s = 'Unsupported action %s for task %s and type %s' % \
+ (action, taskId, taskType)
+ self.err = [s]
+
+
+class GlusterTaskTypeUnknownException(GlusterVolumeException):
+ code = 4165
+ message = "Task type unknown"
+
+ def __init__(self, taskId, taskType):
+ self.taskId = taskId
+ self.taskType = taskType
+ s = 'Unknown task type %s for task %s' % (taskId, taskType)
+ self.err = [s]
+
+
# Host
class GlusterHostException(GlusterException):
code = 4400
diff --git a/vdsm_cli/vdsClientGluster.py b/vdsm_cli/vdsClientGluster.py
index be47696..39c4782 100644
--- a/vdsm_cli/vdsClientGluster.py
+++ b/vdsm_cli/vdsClientGluster.py
@@ -112,23 +112,29 @@
return status['status']['code'],
status['status']['message']
def do_glusterVolumeRebalanceStart(self, args):
- params = self._eqSplit(args[1:])
- rebalanceType = params.get('type', 'fix-layout')
- force = params.get('force', False)
- status = self.s.glusterVolumeRebalanceStart(args[0],
- rebalanceType, force)
- pp.pprint(status)
+ params = self._eqSplit(args)
+ volumeName = params.get('volumeName', '')
+ rebalanceType = params.get('rebalanceType', '')
+ force = (params.get('force', 'no').upper() == 'YES')
+
+ status = self.s.glusterVolumeRebalanceStart(volumeName,
+ rebalanceType,
+ force)
return status['status']['code'],
status['status']['message']
def do_glusterVolumeRebalanceStop(self, args):
- params = self._eqSplit(args[1:])
- force = params.get('force', False)
- status = self.s.glusterVolumeRebalanceStop(args[0], force)
- pp.pprint(status)
+ params = self._eqSplit(args)
+ volumeName = params.get('volumeName', '')
+ force = (params.get('force', 'no').upper() == 'YES')
+
+ status = self.s.glusterVolumeRebalanceStop(volumeName, force)
return status['status']['code'],
status['status']['message']
def do_glusterVolumeRebalanceStatus(self, args):
- status = self.s.glusterVolumeRebalanceStatus(args[0])
+ params = self._eqSplit(args)
+ volumeName = params.get('volumeName', '')
+
+ status = self.s.glusterVolumeRebalanceStatus(volumeName)
pp.pprint(status)
return status['status']['code'],
status['status']['message']
@@ -148,76 +154,118 @@
return status['status']['code'],
status['status']['message']
def do_glusterVolumeReplaceBrickStart(self, args):
- status = self.s.glusterVolumeReplaceBrickStart(args[0], args[1],
- args[2])
+ params = self._eqSplit(args)
+ volumeName = params.get('volumeName', '')
+ existingBrick = params.get('existingBrick', '')
+ newBrick = params.get('newBrick', '')
+
+ status = self.s.glusterVolumeReplaceBrickStart(volumeName,
+ existingBrick,
+ newBrick)
return status['status']['code'],
status['status']['message']
def do_glusterVolumeReplaceBrickAbort(self, args):
- status = self.s.glusterVolumeReplaceBrickAbort(args[0], args[1],
- args[2])
+ params = self._eqSplit(args)
+ volumeName = params.get('volumeName', '')
+ existingBrick = params.get('existingBrick', '')
+ newBrick = params.get('newBrick', '')
+
+ status = self.s.glusterVolumeReplaceBrickAbort(volumeName,
+ existingBrick,
+ newBrick)
return status['status']['code'],
status['status']['message']
def do_glusterVolumeReplaceBrickPause(self, args):
- status = self.s.glusterVolumeReplaceBrickPause(args[0], args[1],
- args[2])
+ params = self._eqSplit(args)
+ volumeName = params.get('volumeName', '')
+ existingBrick = params.get('existingBrick', '')
+ newBrick = params.get('newBrick', '')
+
+ status = self.s.glusterVolumeReplaceBrickPause(volumeName,
+ existingBrick,
+ newBrick)
return status['status']['code'],
status['status']['message']
def do_glusterVolumeReplaceBrickStatus(self, args):
- status = self.s.glusterVolumeReplaceBrickStatus(args[0], args[1],
- args[2])
+ params = self._eqSplit(args)
+ volumeName = params.get('volumeName', '')
+ existingBrick = params.get('existingBrick', '')
+ newBrick = params.get('newBrick', '')
+
+ status = self.s.glusterVolumeReplaceBrickStatus(volumeName,
+ existingBrick,
+ newBrick)
+ pp.pprint(status)
return status['status']['code'],
status['status']['message']
def do_glusterVolumeReplaceBrickCommit(self, args):
- status = self.s.glusterVolumeReplaceBrickCommit(args[0], args[1],
- args[2])
+ params = self._eqSplit(args)
+ volumeName = params.get('volumeName', '')
+ existingBrick = params.get('existingBrick', '')
+ newBrick = params.get('newBrick', '')
+ force = (params.get('force', 'no').upper() == 'YES')
+
+ status = self.s.glusterVolumeReplaceBrickCommit(volumeName,
+ existingBrick,
+ newBrick,
+ force)
return status['status']['code'],
status['status']['message']
def do_glusterVolumeRemoveBrickStart(self, args):
- params = self._eqSplit(args[1:])
+ params = self._eqSplit(args)
+ volumeName = params.get('volumeName', '')
try:
brickList = params['bricks'].split(',')
except:
raise ValueError
replicaCount = params.get('replica', '')
- status = self.s.glusterVolumeRemoveBrickStart(args[0], brickList,
+
+ status = self.s.glusterVolumeRemoveBrickStart(volumeName,
+ brickList,
replicaCount)
- pp.pprint(status)
return status['status']['code'],
status['status']['message']
def do_glusterVolumeRemoveBrickStop(self, args):
- params = self._eqSplit(args[1:])
+ params = self._eqSplit(args)
+ volumeName = params.get('volumeName', '')
try:
brickList = params['bricks'].split(',')
except:
raise ValueError
replicaCount = params.get('replica', '')
- status = self.s.glusterVolumeRemoveBrickStop(args[0], brickList,
+
+ status = self.s.glusterVolumeRemoveBrickStop(volumeName,
+ brickList,
replicaCount)
- pp.pprint(status)
return status['status']['code'],
status['status']['message']
def do_glusterVolumeRemoveBrickStatus(self, args):
- params = self._eqSplit(args[1:])
+ params = self._eqSplit(args)
+ volumeName = params.get('volumeName', '')
try:
brickList = params['bricks'].split(',')
except:
raise ValueError
replicaCount = params.get('replica', '')
- status = self.s.glusterVolumeRemoveBrickStatus(args[0], brickList,
+
+ status = self.s.glusterVolumeRemoveBrickStatus(volumeName,
+ brickList,
replicaCount)
pp.pprint(status)
return status['status']['code'],
status['status']['message']
def do_glusterVolumeRemoveBrickCommit(self, args):
- params = self._eqSplit(args[1:])
+ params = self._eqSplit(args)
+ volumeName = params.get('volumeName', '')
try:
brickList = params['bricks'].split(',')
except:
raise ValueError
replicaCount = params.get('replica', '')
- status = self.s.glusterVolumeRemoveBrickCommit(args[0], brickList,
+
+ status = self.s.glusterVolumeRemoveBrickCommit(volumeName,
+ brickList,
replicaCount)
- pp.pprint(status)
return status['status']['code'],
status['status']['message']
def do_glusterVolumeRemoveBrickForce(self, args):
@@ -270,6 +318,14 @@
status = self.s.glusterVolumeProfileInfo(volumeName, nfs)
pp.pprint(status)
+ return status['status']['code'],
status['status']['message']
+
+ def do_glusterTaskActionPerform(self, args):
+ params = self._eqSplit(args)
+ taskId = params.get('taskId', '')
+ action = params.get('action', '')
+
+ status = self.s.glusterTaskActionPerform(taskid, action)
return status['status']['code'],
status['status']['message']
@@ -338,18 +394,22 @@
)),
'glusterVolumeRebalanceStart': (
serv.do_glusterVolumeRebalanceStart,
- ('<volume_name>\n\t<volume_name> is existing volume
name',
+ ('volumeName=<volume_name> [rebalanceType=fix-layout] '
+ '[force={yes|no}]\n\t'
+ '<volume_name> is existing volume name',
'start volume rebalance'
)),
'glusterVolumeRebalanceStop': (
serv.do_glusterVolumeRebalanceStop,
- ('<volume_name>\n\t<volume_name> is existing volume
name',
+ ('volumeName=<volume_name> [force={yes|no}]\n\t'
+ '<volume_name> is existing volume name',
'stop volume rebalance'
)),
'glusterVolumeRebalanceStatus': (
serv.do_glusterVolumeRebalanceStatus,
- ('<volume_name>\n\t<volume_name> is existing volume
name',
- 'get volume rebalance status'
+ ('volumeName=<volume_name>\n\t'
+ '<volume_name> is existing volume name',
+ 'get volume rebalance status'
)),
'glusterVolumeDelete': (
serv.do_glusterVolumeDelete,
@@ -366,65 +426,79 @@
)),
'glusterVolumeReplaceBrickStart': (
serv.do_glusterVolumeReplaceBrickStart,
- ('<volume_name> <existing_brick> <new_brick>
\n\t<volume_name> '
- 'is existing volume name\n\t<brick> is existing brick\n\t'
+ ('volumeName=<volume_name> existingBrick=<existing_brick>
'
+ 'newBrick=<new_brick>\n\t'
+ '<volume_name> is existing volume name\n\t'
+ '<existing_brick> is existing brick\n\t'
'<new_brick> is new brick',
'start volume replace brick'
)),
'glusterVolumeReplaceBrickAbort': (
serv.do_glusterVolumeReplaceBrickAbort,
- ('<volume_name> <existing_brick> <new_brick>
\n\t<volume_name> '
- 'is existing volume name\n\t<brick> is existing brick\n\t'
+ ('volumeName=<volume_name> existingBrick=<existing_brick>
'
+ 'newBrick=<new_brick>\n\t'
+ '<volume_name> is existing volume name\n\t'
+ '<existing_brick> is existing brick\n\t'
'<new_brick> is new brick',
'abort volume replace brick'
)),
'glusterVolumeReplaceBrickPause': (
serv.do_glusterVolumeReplaceBrickPause,
- ('<volume_name> <existing_brick> <new_brick>
\n\t<volume_name> '
- 'is existing volume name\n\t<brick> is existing brick\n\t'
+ ('volumeName=<volume_name> existingBrick=<existing_brick>
'
+ 'newBrick=<new_brick>\n\t'
+ '<volume_name> is existing volume name\n\t'
+ '<existing_brick> is existing brick\n\t'
'<new_brick> is new brick',
'pause volume replace brick'
)),
'glusterVolumeReplaceBrickStatus': (
serv.do_glusterVolumeReplaceBrickStatus,
- ('<volume_name> <existing_brick> <new_brick>
\n\t<volume_name> '
- 'is existing volume name\n\t<brick> is existing brick\n\t'
+ ('volumeName=<volume_name> existingBrick=<existing_brick>
'
+ 'newBrick=<new_brick>\n\t'
+ '<volume_name> is existing volume name\n\t'
+ '<existing_brick> is existing brick\n\t'
'<new_brick> is new brick',
'get volume replace brick status'
)),
'glusterVolumeReplaceBrickCommit': (
serv.do_glusterVolumeReplaceBrickCommit,
- ('<volume_name> <existing_brick> <new_brick>
\n\t<volume_name> '
- 'is existing volume name\n\t<brick> is existing brick\n\t'
+ ('volumeName=<volume_name> existingBrick=<existing_brick>
'
+ 'newBrick=<new_brick> [force={yes|no}]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ '<existing_brick> is existing brick\n\t'
'<new_brick> is new brick',
'commit volume replace brick'
)),
'glusterVolumeRemoveBrickStart': (
serv.do_glusterVolumeRemoveBrickStart,
- ('<volume_name> [replica=<count>] bricks=brick[,brick] ...
\n\t'
- '<volume_name> is existing volume name\n\t<brick> is '
- 'existing brick',
+ ('volumeName=<volume_name> bricks=<brick[,brick, ...]>
'
+ '[replica=<count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ '<brick[,brick, ...]> is existing brick(s)',
'start volume remove bricks'
)),
'glusterVolumeRemoveBrickStop': (
serv.do_glusterVolumeRemoveBrickStop,
- ('<volume_name> [replica=<count>] bricks=brick[,brick] ...
\n\t'
- '<volume_name> is existing volume name\n\t<brick> is '
- 'existing brick',
+ ('volumeName=<volume_name> bricks=<brick[,brick, ...]>
'
+ '[replica=<count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ '<brick[,brick, ...]> is existing brick(s)',
'stop volume remove bricks'
)),
'glusterVolumeRemoveBrickStatus': (
serv.do_glusterVolumeRemoveBrickStatus,
- ('<volume_name> [replica=<count>] bricks=brick[,brick] ...
\n\t'
- '<volume_name> is existing volume name\n\t<brick> is '
- 'existing brick',
+ ('volumeName=<volume_name> bricks=<brick[,brick, ...]>
'
+ '[replica=<count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ '<brick[,brick, ...]> is existing brick(s)',
'get volume remove bricks status'
)),
'glusterVolumeRemoveBrickCommit': (
serv.do_glusterVolumeRemoveBrickCommit,
- ('<volume_name> [replica=<count>] bricks=brick[,brick] ...
\n\t'
- '<volume_name> is existing volume name\n\t<brick> is '
- 'existing brick',
+ ('volumeName=<volume_name> bricks=<brick[,brick, ...]>
'
+ '[replica=<count>]\n\t'
+ '<volume_name> is existing volume name\n\t'
+ '<brick[,brick, ...]> is existing brick(s)',
'commit volume remove bricks'
)),
'glusterVolumeRemoveBrickForce': (
@@ -468,4 +542,11 @@
('volumeName=<volume_name> [nfs={yes|no}]\n\t'
'<volume_name> is existing volume name',
'get gluster volume profile info'
+ )),
+ 'glusterTaskActionPerform': (
+ serv.do_glusterTaskActionPerform,
+ ('taskId=<task_id> action=<action>\n\t'
+ '<task_id> is running task id\n\t'
+ '<action> is task action to be performed',
+ 'perform action on gluster task'
)), }
--
To view, visit
http://gerrit.ovirt.org/10200
To unsubscribe, visit
http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I154df353bc6f23001d7bf61b8f5345abd2019cb6
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Bala.FA <barumuga(a)redhat.com>
Gerrit-Reviewer: Ayal Baron <abaron(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Federico Simoncelli <fsimonce(a)redhat.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
Gerrit-Reviewer: Timothy Asir <tjeyasin(a)redhat.com>