Hello Ayal Baron, Timothy Asir, Saggi Mizrahi, Federico Simoncelli, Dan Kenigsberg,
I'd like you to do a code review. Please visit
to review the following change.
Change subject: Added gluster tag support in getAllTasks() ......................................................................
Added gluster tag support in getAllTasks()
If param tag is empty, all tasks including gluster tasks are returned, else tasks those tags are in param tag list are returned.
As below verbs are not consumed by engine/RHS-C yet, its OK to differ in compatibility issue now. glusterVolumeRebalanceStart glusterVolumeRebalanceStatus glusterVolumeReplaceBrickStart glusterVolumeReplaceBrickStatus glusterVolumeRemoveBrickStart glusterVolumeRemoveBrickStatus
Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432 Signed-off-by: Bala.FA barumuga@redhat.com --- M tests/gluster_cli_tests.py M vdsm/gluster/cli.py M vdsm/gluster/exception.py M vdsm/storage/taskManager.py 4 files changed, 367 insertions(+), 95 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/79/7579/1
diff --git a/tests/gluster_cli_tests.py b/tests/gluster_cli_tests.py index f442893..9c6357c 100644 --- a/tests/gluster_cli_tests.py +++ b/tests/gluster_cli_tests.py @@ -28,6 +28,7 @@ from gluster import cli as gcli except ImportError: pass +import xml.etree.cElementTree as etree
class GlusterCliTests(TestCaseBase): @@ -115,3 +116,74 @@ def test_parsePeerStatus(self): self._parsePeerStatus_empty_test() self._parsePeerStatus_test() + + def _parseVolumeStatusAll_test(self): + out = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?> +<cliOutput> + <opRet>0</opRet> + <opErrno>0</opErrno> + <opErrstr></opErrstr> + <volumes> + <volume> + <name>V1</name> + <id>03eace73-9197-49d0-a877-831bc6e9dac2</id> + <tasks> + <task> + <name>rebalance</name> + <id>12345473-9197-49d0-a877-831bc6e9dac2</id> + </task> + </tasks> + </volume> + <volume> + <name>V2</name> + <id>03eace73-1237-49d0-a877-831bc6e9dac2</id> + <tasks> + <task> + <name>replace-brick</name> + <id>12345473-1237-49d0-a877-831bc6e9dac2</id> + <sourceBrick>192.168.122.167:/tmp/V2-b1</sourceBrick> + <destBrick>192.168.122.168:/tmp/V2-b1</destBrick> + </task> + </tasks> + </volume> + <volume> + <name>V3</name> + <id>03eace73-1237-1230-a877-831bc6e9dac2</id> + <tasks> + <task> + <name>remove-brick</name> + <id>12345473-1237-1230-a877-831bc6e9dac2</id> + <BrickCount>2</BrickCount> + <brick>192.168.122.167:/tmp/V3-b1</brick> + <brick>192.168.122.168:/tmp/V3-b1</brick> + </task> + </tasks> + </volume> + </volumes> +</cliOutput>""" + tree = etree.fromstring(out) + status = gcli._parseVolumeStatusAll(tree) + self.assertEquals(status, + {'12345473-1237-1230-a877-831bc6e9dac2': + {'bricks': ['192.168.122.167:/tmp/V3-b1', + '192.168.122.168:/tmp/V3-b1'], + 'taskType': 'remove-brick', + 'volumeId': + '03eace73-1237-1230-a877-831bc6e9dac2', + 'volumeName': 'V3'}, + '12345473-1237-49d0-a877-831bc6e9dac2': + {'bricks': ['192.168.122.167:/tmp/V2-b1', + '192.168.122.168:/tmp/V2-b1'], + 'taskType': 'replace-brick', + 'volumeId': + '03eace73-1237-49d0-a877-831bc6e9dac2', + 'volumeName': 'V2'}, + '12345473-9197-49d0-a877-831bc6e9dac2': + {'bricks': [], + 'taskType': 'rebalance', + 'volumeId': + '03eace73-9197-49d0-a877-831bc6e9dac2', + 'volumeName': 'V1'}}) + + def test_parseVolumeStatusAll(self): + self._parseVolumeStatusAll_test() diff --git a/vdsm/gluster/cli.py b/vdsm/gluster/cli.py index 95de106..1f464f6 100644 --- a/vdsm/gluster/cli.py +++ b/vdsm/gluster/cli.py @@ -84,6 +84,55 @@ raise ge.GlusterCmdFailedException(rc=rv, err=[msg])
+class TaskType: + REBALANCE = 'rebalance' + REPLACE_BRICK = 'replace-brick' + REMOVE_BRICK = 'remove-brick' + + +def _parseVolumeStatusAll(tree): + """ + returns {TaskId: {'volumeName': VolumeName, + 'volumeId': VolumeId, + 'taskType': TaskType, + 'bricks': BrickList}, ...} + """ + tasks = {} + for el in tree.findall('volumes/volume'): + volumeName = el.find('name').text + volumeId = el.find('id').text + for c in el.findall('tasks/task'): + taskType = c.find('name').text + taskId = c.find('id').text + bricks = [] + if taskType == TaskType.REPLACE_BRICK: + bricks.append(c.find('sourceBrick').text) + bricks.append(c.find('destBrick').text) + elif taskType == TaskType.REMOVE_BRICK: + for b in c.findall('brick'): + bricks.append(b.text) + elif taskType == TaskType.REBALANCE: + pass + tasks[taskId] = {'volumeName': volumeName, + 'volumeId': volumeId, + 'taskType': taskType, + 'bricks': bricks} + return tasks + + +@exportToSuperVdsm +def volumeStatusAll(): + command = _getGlusterVolCmd() + ["status", "all"] + try: + xmltree, out = _execGlusterXml(command) + except ge.GlusterCmdFailedException, e: + raise ge.GlusterVolumeStatusAllFailedException(rc=e.rc, err=e.err) + try: + return _parseVolumeStatusAll(xmltree) + except: + raise ge.GlusterXmlErrorException(err=out) + + def _parseVolumeInfo(out): if not out[0].strip(): del out[0] @@ -300,11 +349,15 @@ command.append("start") if force: command.append("force") - rc, out, err = _execGluster(command) - if rc: - raise ge.GlusterVolumeRebalanceStartFailedException(rc, out, err) - else: - return True + try: + xmltree, out = _execGlusterXml(command) + except ge.GlusterCmdFailedException, e: + raise ge.GlusterVolumeRebalanceStartFailedException(rc=e.rc, + err=e.err) + try: + return {'taskId': xmltree.find('id').text} + except: + raise ge.GlusterXmlErrorException(err=out)
@exportToSuperVdsm @@ -312,84 +365,147 @@ command = _getGlusterVolCmd() + ["rebalance", volumeName, "stop"] if force: command.append('force') - rc, out, err = _execGluster(command) - if rc: - raise ge.GlusterVolumeRebalanceStopFailedException(rc, out, err) - else: + try: + _execGlusterXml(command) return True + except ge.GlusterCmdFailedException, e: + raise ge.GlusterVolumeRebalanceStopFailedException(rc=e.rc, + err=e.err) + + +class TaskStatus(): + RUNNING = 'RUNNING' + FAILED = 'FAILED' + COMPLETED = 'COMPLETED' + + +def _parseVolumeRebalanceRemoveBrickStatus(xmltree, mode): + """ + returns {'taskId': UUID, + 'host': [{'name': NAME, + 'id': HOSTID, + 'filesScanned': INT, + 'filesMoved': INT, + 'filesFailed': INT, + 'totalSizeMoved': INT, + 'status': TaskStatus},...] + 'summary': {'filesScanned': INT, + 'filesMoved': INT, + 'filesFailed': INT, + 'totalSizeMoved': INT, + 'status': TaskStatus}} + """ + if mode == 'rebalance': + tree = xmltree.find('volRebalance') + elif mode == 'remove-brick': + tree = xmltree.find('volRemoveBrick') + else: + return + status = \ + {'taskId': tree.find('id').text, + 'summary': \ + {'filesScanned': int(tree.find('summary/filesScanned').text), + 'filesMoved': int(tree.find('summary/filesMoved').text), + 'filesFailed': int(tree.find('summary/filesFailed').text), + 'totalSizeMoved': int(tree.find('summary/totalSizeMoved').text), + 'status': tree.find('summary/status').text}, + 'host': []} + for el in tree.findall('node'): + status['host'].append({'name': el.find('name').text, + 'id': el.find('id').text, + 'filesScanned': + int(el.find('filesScanned').text), + 'filesMoved': int(el.find('filesMoved').text), + 'filesFailed': int(el.find('filesFailed').text), + 'totalSizeMoved': + int(el.find('totalSizeMoved').text), + 'status': el.find('status').text}) + return status + + +def _parseVolumeRebalanceStatus(tree): + return _parseVolumeRebalanceRemoveBrickStatus(tree, 'rebalance')
@exportToSuperVdsm def volumeRebalanceStatus(volumeName): - rc, out, err = _execGluster(_getGlusterVolCmd() + ["rebalance", volumeName, - "status"]) - if rc: - raise ge.GlusterVolumeRebalanceStatusFailedException(rc, out, err) - if 'in progress' in out[0]: - return BrickStatus.RUNNING, "\n".join(out) - elif 'complete' in out[0]: - return BrickStatus.COMPLETED, "\n".join(out) - else: - return BrickStatus.UNKNOWN, "\n".join(out) + command = _getGlusterVolCmd() + ["rebalance", volumeName, "status"] + try: + xmltree, out = _execGlusterXml(command) + except ge.GlusterCmdFailedException, e: + raise ge.GlusterVolumeRebalanceStatusFailedException(rc=e.rc, + err=e.err) + try: + return _parseVolumeRebalanceStatus(xmltree) + except: + raise ge.GlusterXmlErrorException(err=out)
@exportToSuperVdsm def volumeReplaceBrickStart(volumeName, existingBrick, newBrick): - rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick", - volumeName, - existingBrick, newBrick, - "start"]) - if rc: - raise ge.GlusterVolumeReplaceBrickStartFailedException(rc, out, err) - else: - return True + command = _getGlusterVolCmd() + ["replace-brick", volumeName, + existingBrick, newBrick, "start"] + try: + xmltree, out = _execGlusterXml(command) + except ge.GlusterCmdFailedException, e: + raise ge.GlusterVolumeReplaceBrickStartFailedException(rc=e.rc, + err=e.err) + try: + return {'taskId': xmltree.find('id').text} + except: + raise ge.GlusterXmlErrorException(err=out)
@exportToSuperVdsm def volumeReplaceBrickAbort(volumeName, existingBrick, newBrick): - rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick", - volumeName, - existingBrick, newBrick, - "abort"]) - if rc: - raise ge.GlusterVolumeReplaceBrickAbortFailedException(rc, out, err) - else: + command = _getGlusterVolCmd() + ["replace-brick", volumeName, + existingBrick, newBrick, "abort"] + try: + _execGlusterXml(command) return True + except ge.GlusterCmdFailedException, e: + raise ge.GlusterVolumeReplaceBrickAbortFailedException(rc=e.rc, + err=e.err)
@exportToSuperVdsm def volumeReplaceBrickPause(volumeName, existingBrick, newBrick): - rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick", - volumeName, - existingBrick, newBrick, - "pause"]) - if rc: - raise ge.GlusterVolumeReplaceBrickPauseFailedException(rc, out, err) - else: + command = _getGlusterVolCmd() + ["replace-brick", volumeName, + existingBrick, newBrick, "pause"] + try: + _execGlusterXml(command) return True + except ge.GlusterCmdFailedException, e: + raise ge.GlusterVolumeReplaceBrickPauseFailedException(rc=e.rc, + err=e.err) + + +def _parseVolumeReplaceBrickStatus(tree): + """ + returns {'taskId': UUID, + 'filesMoved': INT, + 'movingFile': STRING, + 'status': TaskStatus}} + """ + return {'taskId': tree.find('volReplaceBrick/id').text, + 'filesMoved': int(tree.find('volReplaceBrick/filesMoved').text), + 'movingFile': tree.find('volReplaceBrick/movingFile').text, + 'status': tree.find('volReplaceBrick/status').text}
@exportToSuperVdsm def volumeReplaceBrickStatus(volumeName, existingBrick, newBrick): - rc, out, err = _execGluster(_getGlusterVolCmd() + ["replace-brick", - volumeName, - existingBrick, newBrick, - "status"]) - if rc: - raise ge.GlusterVolumeReplaceBrickStatusFailedException(rc, out, - err) - message = "\n".join(out) - statLine = out[0].strip().upper() - if BrickStatus.PAUSED in statLine: - return BrickStatus.PAUSED, message - elif statLine.endswith('MIGRATION COMPLETE'): - return BrickStatus.COMPLETED, message - elif statLine.startswith('NUMBER OF FILES MIGRATED'): - return BrickStatus.RUNNING, message - elif statLine.endswith("UNKNOWN"): - return BrickStatus.UNKNOWN, message - else: - return BrickStatus.NA, message + command = _getGlusterVolCmd() + ["replace-brick", volumeName, + existingBrick, newBrick, "status"] + try: + xmltree, out = _execGlusterXml(command) + except ge.GlusterCmdFailedException, e: + raise ge.GlusterVolumeReplaceBrickStatusFailedException(rc=e.rc, + err=e.err) + try: + return _parseVolumeReplaceBrickStatus(xmltree) + except: + raise ge.GlusterXmlErrorException(err=out)
@exportToSuperVdsm @@ -399,12 +515,12 @@ existingBrick, newBrick, "commit"] if force: command.append('force') - rc, out, err = _execGluster(command) - if rc: - raise ge.GlusterVolumeReplaceBrickCommitFailedException(rc, out, - err) - else: + try: + _execGlusterXml(command) return True + except ge.GlusterCmdFailedException, e: + raise ge.GlusterVolumeReplaceBrickCommitFailedException(rc=e.rc, + err=e.err)
@exportToSuperVdsm @@ -413,12 +529,15 @@ if replicaCount: command += ["replica", "%s" % replicaCount] command += brickList + ["start"] - - rc, out, err = _execGluster(command) - if rc: - raise ge.GlusterVolumeRemoveBrickStartFailedException(rc, out, err) - else: - return True + try: + xmltree, out = _execGlusterXml(command) + except ge.GlusterCmdFailedException, e: + raise ge.GlusterVolumeRemoveBrickStartFailedException(rc=e.rc, + err=e.err) + try: + return {'taskId': xmltree.find('id').text} + except: + raise ge.GlusterXmlErrorException(err=out)
@exportToSuperVdsm @@ -427,12 +546,16 @@ if replicaCount: command += ["replica", "%s" % replicaCount] command += brickList + ["stop"] - rc, out, err = _execGluster(command) - - if rc: - raise ge.GlusterVolumeRemoveBrickStopFailedException(rc, out, err) - else: + try: + _execGlusterXml(command) return True + except ge.GlusterCmdFailedException, e: + raise ge.GlusterVolumeRemoveBrickStopFailedException(rc=e.rc, + err=e.err) + + +def _parseVolumeRemoveBrickStatus(tree): + return _parseVolumeRebalanceRemoveBrickStatus(tree, 'remove-brick')
@exportToSuperVdsm @@ -441,12 +564,15 @@ if replicaCount: command += ["replica", "%s" % replicaCount] command += brickList + ["status"] - rc, out, err = _execGluster(command) - - if rc: - raise ge.GlusterVolumeRemoveBrickStatusFailedException(rc, out, err) - else: - return "\n".join(out) + try: + xmltree, out = _execGlusterXml(command) + except ge.GlusterCmdFailedException, e: + raise ge.GlusterVolumeRemoveBrickStatusFailedException(rc=e.rc, + err=e.err) + try: + return _parseVolumeRemoveBrickStatus(xmltree) + except: + raise ge.GlusterXmlErrorException(err=out)
@exportToSuperVdsm @@ -455,12 +581,12 @@ if replicaCount: command += ["replica", "%s" % replicaCount] command += brickList + ["commit"] - rc, out, err = _execGluster(command) - - if rc: - raise ge.GlusterVolumeRemoveBrickCommitFailedException(rc, out, err) - else: + try: + _execGlusterXml(command) return True + except ge.GlusterCmdFailedException, e: + raise ge.GlusterVolumeRemoveBrickCommitFailedException(rc=e.rc, + err=e.err)
@exportToSuperVdsm @@ -469,12 +595,12 @@ if replicaCount: command += ["replica", "%s" % replicaCount] command += brickList + ["force"] - rc, out, err = _execGluster(command) - - if rc: - raise ge.GlusterVolumeRemoveBrickForceFailedException(rc, out, err) - else: + try: + _execGlusterXml(command) return True + except ge.GlusterCmdFailedException, e: + raise ge.GlusterVolumeRemoveBrickForceFailedException(rc=e.rc, + err=e.err)
@exportToSuperVdsm diff --git a/vdsm/gluster/exception.py b/vdsm/gluster/exception.py index f4f497b..f209885 100644 --- a/vdsm/gluster/exception.py +++ b/vdsm/gluster/exception.py @@ -323,6 +323,11 @@ message = "Volume remove brick force failed"
+class GlusterVolumeStatusAllFailedException(GlusterVolumeException): + code = 4158 + message = "Volume status all failed" + + # Host class GlusterHostException(GlusterException): code = 4400 diff --git a/vdsm/storage/taskManager.py b/vdsm/storage/taskManager.py index 3bc12f3..0a269cd 100644 --- a/vdsm/storage/taskManager.py +++ b/vdsm/storage/taskManager.py @@ -25,6 +25,12 @@ import storage_exception as se from task import Task, Job, TaskCleanType from threadPool import ThreadPool +try: + from gluster import cli as gcli + from gluster import exception as ge + _glusterEnabled = True +except ImportError: + _glusterEnabled = False
class TaskManager: @@ -113,19 +119,82 @@ self.log.debug("Return: %s", subRes) return subRes
- def getAllTasks(self): + def _getAllGlusterTasks(self): """ - Return Tasks for all public tasks. + Return all gluster tasks + """ + subRes = {} + if not _glusterEnabled: + return subRes + + for taskId, value in gcli.volumeStatusAll(): + msg = '' + state = '' + try: + if value['taskType'] == gcli.TaskType.REBALANCE: + status = gcli.volumeRebalanceStatus(value['volumeName']) + msg = ('Files [scanned: %d, moved: %d, failed: %d], ' + 'Total size moved: %d') % \ + (status['summary']['filesScanned'], + status['summary']['filesMoved'], + status['summary']['filesFailed'], + status['summary']['totalSizeMoved']) + state = status['summary']['status'] + elif value['taskType'] == gcli.TaskType.REMOVE_BRICK: + status = gcli.volumeRemoveBrickStatus(value['volumeName'], + value['bricks']) + msg = ('Files [scanned: %d, moved: %d, failed: %d], ' + 'Total size moved: %d') % \ + (status['summary']['filesScanned'], + status['summary']['filesMoved'], + status['summary']['filesFailed'], + status['summary']['totalSizeMoved']) + state = status['summary']['status'] + elif value['taskType'] == gcli.TaskType.REPLACE_BRICK: + status = gcli.volumeReplaceBrickStatus(value['volumeName'], + value['bricks'][0], + value['bricks'][1]) + msg = 'Files moved: %d, Moving file: %s' % \ + (status['filesMoved'], status['movingFile']) + state = status['status'] + except ge.GlusterException: + self.log.error("gluster exception occured", exc_info=True) + + subRes[taskId] = {"id": taskId, + "verb": value['volumeName'], + "state": state, + "code": value['taskType'], + "message": msg, + "result": '', + "tag": 'gluster'} + return subRes + + def getAllTasks(self, tag=[]): + """ + Return Tasks for all public tasks if param tag is empty, + else return tasks those tags are in param tag. """ self.log.debug("Entry.") subRes = {} for taskID, task in self._tasks.items(): try: - subRes[taskID] = task.getDetails() + if not tag: + subRes[taskID] = task.getDetails() + elif task.getTags() in tag: + subRes[taskID] = task.getDetails() except se.UnknownTask: # Return info for existing tasks only. self.log.warn("Unknown task %s. Maybe task was already " "cleared.", taskID) + + try: + if not tag: + subRes.update(self._getAllGlusterTasks()) + elif 'gluster' in tag: + subRes.update(self._getAllGlusterTasks()) + except ge.GlusterException: + self.log.error("gluster exception occured", exc_info=True) + self.log.debug("Return: %s", subRes) return subRes
-- To view, visit http://gerrit.ovirt.org/7579 To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange Gerrit-Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432 Gerrit-PatchSet: 1 Gerrit-Project: vdsm Gerrit-Branch: master Gerrit-Owner: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Ayal Baron abaron@redhat.com Gerrit-Reviewer: Dan Kenigsberg danken@redhat.com Gerrit-Reviewer: Federico Simoncelli fsimonce@redhat.com Gerrit-Reviewer: Saggi Mizrahi smizrahi@redhat.com Gerrit-Reviewer: Timothy Asir tjeyasin@redhat.com
oVirt Jenkins CI Server has posted comments on this change.
Change subject: Added gluster tag support in getAllTasks() ......................................................................
Patch Set 1:
Build Successful
http://jenkins.ovirt.info/job/patch_vdsm_unit_tests/750/ : SUCCESS
-- To view, visit http://gerrit.ovirt.org/7579 To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: comment Gerrit-Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432 Gerrit-PatchSet: 1 Gerrit-Project: vdsm Gerrit-Branch: master Gerrit-Owner: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Ayal Baron abaron@redhat.com Gerrit-Reviewer: Dan Kenigsberg danken@redhat.com Gerrit-Reviewer: Federico Simoncelli fsimonce@redhat.com Gerrit-Reviewer: Saggi Mizrahi smizrahi@redhat.com Gerrit-Reviewer: Timothy Asir tjeyasin@redhat.com Gerrit-Reviewer: oVirt Jenkins CI Server
oVirt Jenkins CI Server has posted comments on this change.
Change subject: Added gluster tag support in getAllTasks() ......................................................................
Patch Set 2:
Build Successful
http://jenkins.ovirt.info/job/patch_vdsm_unit_tests/791/ : SUCCESS
-- To view, visit http://gerrit.ovirt.org/7579 To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: comment Gerrit-Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432 Gerrit-PatchSet: 2 Gerrit-Project: vdsm Gerrit-Branch: master Gerrit-Owner: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Ayal Baron abaron@redhat.com Gerrit-Reviewer: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Dan Kenigsberg danken@redhat.com Gerrit-Reviewer: Federico Simoncelli fsimonce@redhat.com Gerrit-Reviewer: Saggi Mizrahi smizrahi@redhat.com Gerrit-Reviewer: Timothy Asir tjeyasin@redhat.com Gerrit-Reviewer: oVirt Jenkins CI Server
Adam Litke has posted comments on this change.
Change subject: Added gluster tag support in getAllTasks() ......................................................................
Patch Set 2: I would prefer that you didn't submit this
(2 inline comments)
.................................................... File vdsm/storage/taskManager.py Line 157: msg = 'Files moved: %d, Moving file: %s' % \ Line 158: (status['filesMoved'], status['movingFile']) Line 159: state = status['status'] Line 160: except ge.GlusterException: Line 161: self.log.error("gluster exception occured", exc_info=True) The above seems to me like a layering violation. You are adding a lot of gluster-specific logic to this file. The task manager should remain a general-purpose task manager. Can you find a way to move this out into the gluster code please? Line 162: Line 163: subRes[taskId] = {"id": taskId, Line 164: "verb": value['volumeName'], Line 165: "state": state,
Line 193: elif 'gluster' in tag: Line 194: subRes.update(self._getAllGlusterTasks()) Line 195: except ge.GlusterException: Line 196: self.log.error("gluster exception occured", exc_info=True) Line 197: Again, there shouldn't be tag-specific logic in this file. I don't mind filtering tasks by tag in here but we should not be treating tasks differently depending on their tag. Line 198: self.log.debug("Return: %s", subRes) Line 199: return subRes Line 200: Line 201: def unloadTasks(self, tag=None):
-- To view, visit http://gerrit.ovirt.org/7579 To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: comment Gerrit-Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432 Gerrit-PatchSet: 2 Gerrit-Project: vdsm Gerrit-Branch: master Gerrit-Owner: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Adam Litke agl@us.ibm.com Gerrit-Reviewer: Ayal Baron abaron@redhat.com Gerrit-Reviewer: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Dan Kenigsberg danken@redhat.com Gerrit-Reviewer: Federico Simoncelli fsimonce@redhat.com Gerrit-Reviewer: Saggi Mizrahi smizrahi@redhat.com Gerrit-Reviewer: Timothy Asir tjeyasin@redhat.com Gerrit-Reviewer: oVirt Jenkins CI Server
Bala.FA has posted comments on this change.
Change subject: Added gluster tag support in getAllTasks() ......................................................................
Patch Set 2: (2 inline comments)
.................................................... File vdsm/storage/taskManager.py Line 157: msg = 'Files moved: %d, Moving file: %s' % \ Line 158: (status['filesMoved'], status['movingFile']) Line 159: state = status['status'] Line 160: except ge.GlusterException: Line 161: self.log.error("gluster exception occured", exc_info=True) Done Line 162: Line 163: subRes[taskId] = {"id": taskId, Line 164: "verb": value['volumeName'], Line 165: "state": state,
Line 193: elif 'gluster' in tag: Line 194: subRes.update(self._getAllGlusterTasks()) Line 195: except ge.GlusterException: Line 196: self.log.error("gluster exception occured", exc_info=True) Line 197: Gluster tasks don't fit in current task framework and they are managed by gluster itself. We need a way to expose them into engine so that they will be treated normally like other tasks.
Based on this requirement, Current way of exposing gluster tasks is good I think. Line 198: self.log.debug("Return: %s", subRes) Line 199: return subRes Line 200: Line 201: def unloadTasks(self, tag=None):
-- To view, visit http://gerrit.ovirt.org/7579 To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: comment Gerrit-Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432 Gerrit-PatchSet: 2 Gerrit-Project: vdsm Gerrit-Branch: master Gerrit-Owner: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Adam Litke agl@us.ibm.com Gerrit-Reviewer: Ayal Baron abaron@redhat.com Gerrit-Reviewer: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Dan Kenigsberg danken@redhat.com Gerrit-Reviewer: Federico Simoncelli fsimonce@redhat.com Gerrit-Reviewer: Saggi Mizrahi smizrahi@redhat.com Gerrit-Reviewer: Timothy Asir tjeyasin@redhat.com Gerrit-Reviewer: oVirt Jenkins CI Server
Adam Litke has posted comments on this change.
Change subject: Added gluster tag support in getAllTasks() ......................................................................
Patch Set 2: (1 inline comment)
.................................................... File vdsm/storage/taskManager.py Line 193: elif 'gluster' in tag: Line 194: subRes.update(self._getAllGlusterTasks()) Line 195: except ge.GlusterException: Line 196: self.log.error("gluster exception occured", exc_info=True) Line 197: I wouldn't say it's good. It's really just a hack. Rather than continuing to add special case code all over taskManager for gluster tasks we should fix it properly. It's only going to get harder the longer we keep adding band-aids. Line 198: self.log.debug("Return: %s", subRes) Line 199: return subRes Line 200: Line 201: def unloadTasks(self, tag=None):
-- To view, visit http://gerrit.ovirt.org/7579 To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: comment Gerrit-Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432 Gerrit-PatchSet: 2 Gerrit-Project: vdsm Gerrit-Branch: master Gerrit-Owner: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Adam Litke agl@us.ibm.com Gerrit-Reviewer: Ayal Baron abaron@redhat.com Gerrit-Reviewer: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Dan Kenigsberg danken@redhat.com Gerrit-Reviewer: Federico Simoncelli fsimonce@redhat.com Gerrit-Reviewer: Saggi Mizrahi smizrahi@redhat.com Gerrit-Reviewer: Timothy Asir tjeyasin@redhat.com Gerrit-Reviewer: oVirt Jenkins CI Server
Shireesh Anjal has posted comments on this change.
Change subject: Added gluster tag support in getAllTasks() ......................................................................
Patch Set 2: (1 inline comment)
.................................................... File vdsm/storage/taskManager.py Line 193: elif 'gluster' in tag: Line 194: subRes.update(self._getAllGlusterTasks()) Line 195: except ge.GlusterException: Line 196: self.log.error("gluster exception occured", exc_info=True) Line 197: To elaborate a little on Bala's statement, gluster tasks are a bit different in the sense they can be started on any server of the cluster, and later their status can be checked on any other server of the cluster, using the gluster CLI. So we can't rely on self._tasks for gluster tasks, when returning task statuses. This, I believe is unique to gluster and probably deserves to be handled in a special way.
Having said this, please do suggest if you have a better way of doing this, and I'm sure Bala will be more than happy to implement it :) Line 198: self.log.debug("Return: %s", subRes) Line 199: return subRes Line 200: Line 201: def unloadTasks(self, tag=None):
-- To view, visit http://gerrit.ovirt.org/7579 To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: comment Gerrit-Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432 Gerrit-PatchSet: 2 Gerrit-Project: vdsm Gerrit-Branch: master Gerrit-Owner: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Adam Litke agl@us.ibm.com Gerrit-Reviewer: Ayal Baron abaron@redhat.com Gerrit-Reviewer: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Dan Kenigsberg danken@redhat.com Gerrit-Reviewer: Federico Simoncelli fsimonce@redhat.com Gerrit-Reviewer: Saggi Mizrahi smizrahi@redhat.com Gerrit-Reviewer: Shireesh Anjal sanjal@redhat.com Gerrit-Reviewer: Timothy Asir tjeyasin@redhat.com Gerrit-Reviewer: oVirt Jenkins CI Server
Adam Litke has posted comments on this change.
Change subject: Added gluster tag support in getAllTasks() ......................................................................
Patch Set 3: I would prefer that you didn't submit this
(2 inline comments)
.................................................... File vdsm/storage/taskManager.py Line 24 Line 25 Line 26 Line 27 Line 28 I still do not like this scope creep into taskManager. If you need custom logic for gluster tasks then that should be moved into a separate file 'glusterTasks.py' with an appropriate interface. All of this glusterEnabled cruft should be hidden inside that gluster-specific module.
Line 144: elif 'gluster' in tag: Line 145: subRes.update(GlusterApi.getTasks()) Line 146: except ge.GlusterException: Line 147: self.log.error("gluster exception occured", exc_info=True) Line 148: Once you have a glusterTasks.py you can:
gTasks = glusterTasks.getTasks(tag) subres.update(gTasks)
This module should not be dealing with whether or not gluster is enabled. It should also not be directly handling GlusterException Line 149: self.log.debug("Return: %s", subRes) Line 150: return subRes Line 151: Line 152: def unloadTasks(self, tag=None):
-- To view, visit http://gerrit.ovirt.org/7579 To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: comment Gerrit-Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432 Gerrit-PatchSet: 3 Gerrit-Project: vdsm Gerrit-Branch: master Gerrit-Owner: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Adam Litke agl@us.ibm.com Gerrit-Reviewer: Ayal Baron abaron@redhat.com Gerrit-Reviewer: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Dan Kenigsberg danken@redhat.com Gerrit-Reviewer: Federico Simoncelli fsimonce@redhat.com Gerrit-Reviewer: Saggi Mizrahi smizrahi@redhat.com Gerrit-Reviewer: Shireesh Anjal sanjal@redhat.com Gerrit-Reviewer: Timothy Asir tjeyasin@redhat.com Gerrit-Reviewer: oVirt Jenkins CI Server
Bala.FA has posted comments on this change.
Change subject: Added gluster tag support in getAllTasks() ......................................................................
Patch Set 3: (2 inline comments)
.................................................... File vdsm/storage/taskManager.py Line 24 Line 25 Line 26 Line 27 Line 28 Done
Line 144: elif 'gluster' in tag: Line 145: subRes.update(GlusterApi.getTasks()) Line 146: except ge.GlusterException: Line 147: self.log.error("gluster exception occured", exc_info=True) Line 148: Done Line 149: self.log.debug("Return: %s", subRes) Line 150: return subRes Line 151: Line 152: def unloadTasks(self, tag=None):
-- To view, visit http://gerrit.ovirt.org/7579 To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: comment Gerrit-Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432 Gerrit-PatchSet: 3 Gerrit-Project: vdsm Gerrit-Branch: master Gerrit-Owner: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Adam Litke agl@us.ibm.com Gerrit-Reviewer: Ayal Baron abaron@redhat.com Gerrit-Reviewer: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Dan Kenigsberg danken@redhat.com Gerrit-Reviewer: Federico Simoncelli fsimonce@redhat.com Gerrit-Reviewer: Saggi Mizrahi smizrahi@redhat.com Gerrit-Reviewer: Shireesh Anjal sanjal@redhat.com Gerrit-Reviewer: Timothy Asir tjeyasin@redhat.com Gerrit-Reviewer: oVirt Jenkins CI Server
Adam Litke has posted comments on this change.
Change subject: Added gluster tag support in getAllTasks() ......................................................................
Patch Set 4: (2 inline comments)
Almost there. Just want to ask Dan to review...
.................................................... File vdsm/storage/taskManager.py Line 113: taskID) Line 114: self.log.debug("Return: %s", subRes) Line 115: return subRes Line 116: Line 117: def getAllTasks(self, tag=[]): Hopefully Dan can weigh in on this. I don't want to mis-characterize his views on using an array for the tag argument. If we keep the array, please change tag to tags to convey the plurality. Line 118: """ Line 119: Return Tasks for all public tasks if param tag is empty, Line 120: else return tasks those tags are in param tag. Line 121: """
Line 132: self.log.warn("Unknown task %s. Maybe task was already " Line 133: "cleared.", taskID) Line 134: Line 135: subRes.update(glusterTasks.getTasks(tag)) Line 136: Thanks. This is much better. Line 137: self.log.debug("Return: %s", subRes) Line 138: return subRes Line 139: Line 140: def unloadTasks(self, tag=None):
-- To view, visit http://gerrit.ovirt.org/7579 To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: comment Gerrit-Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432 Gerrit-PatchSet: 4 Gerrit-Project: vdsm Gerrit-Branch: master Gerrit-Owner: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Adam Litke agl@us.ibm.com Gerrit-Reviewer: Ayal Baron abaron@redhat.com Gerrit-Reviewer: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Dan Kenigsberg danken@redhat.com Gerrit-Reviewer: Federico Simoncelli fsimonce@redhat.com Gerrit-Reviewer: Saggi Mizrahi smizrahi@redhat.com Gerrit-Reviewer: Shireesh Anjal sanjal@redhat.com Gerrit-Reviewer: Timothy Asir tjeyasin@redhat.com Gerrit-Reviewer: oVirt Jenkins CI Server
Dan Kenigsberg has posted comments on this change.
Change subject: Added gluster tag support in getAllTasks() ......................................................................
Patch Set 4: (1 inline comment)
.................................................... File vdsm/storage/taskManager.py Line 113: taskID) Line 114: self.log.debug("Return: %s", subRes) Line 115: return subRes Line 116: Line 117: def getAllTasks(self, tag=[]): I'm all for having a set of tags. However, the current code is a mess, since it stores multiple tags as space-separated string, and does an in-string search instead of a proper set membership.
starting to rely on this before fixing it is going to be troublesome. Line 118: """ Line 119: Return Tasks for all public tasks if param tag is empty, Line 120: else return tasks those tags are in param tag. Line 121: """
-- To view, visit http://gerrit.ovirt.org/7579 To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: comment Gerrit-Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432 Gerrit-PatchSet: 4 Gerrit-Project: vdsm Gerrit-Branch: master Gerrit-Owner: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Adam Litke agl@us.ibm.com Gerrit-Reviewer: Ayal Baron abaron@redhat.com Gerrit-Reviewer: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Dan Kenigsberg danken@redhat.com Gerrit-Reviewer: Federico Simoncelli fsimonce@redhat.com Gerrit-Reviewer: Saggi Mizrahi smizrahi@redhat.com Gerrit-Reviewer: Shireesh Anjal sanjal@redhat.com Gerrit-Reviewer: Timothy Asir tjeyasin@redhat.com Gerrit-Reviewer: oVirt Jenkins CI Server
Bala.FA has posted comments on this change.
Change subject: Added gluster tag support in getAllTasks() ......................................................................
Patch Set 4: (1 inline comment)
.................................................... File vdsm/storage/taskManager.py Line 113: taskID) Line 114: self.log.debug("Return: %s", subRes) Line 115: return subRes Line 116: Line 117: def getAllTasks(self, tag=[]): There are two ways I see.
task.getTags() can return self.tag.split(KEY_SEPERATOR) or task.tag in task is list containing tags
Please suggest which one is better to do. Line 118: """ Line 119: Return Tasks for all public tasks if param tag is empty, Line 120: else return tasks those tags are in param tag. Line 121: """
-- To view, visit http://gerrit.ovirt.org/7579 To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: comment Gerrit-Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432 Gerrit-PatchSet: 4 Gerrit-Project: vdsm Gerrit-Branch: master Gerrit-Owner: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Adam Litke agl@us.ibm.com Gerrit-Reviewer: Ayal Baron abaron@redhat.com Gerrit-Reviewer: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Dan Kenigsberg danken@redhat.com Gerrit-Reviewer: Federico Simoncelli fsimonce@redhat.com Gerrit-Reviewer: Saggi Mizrahi smizrahi@redhat.com Gerrit-Reviewer: Shireesh Anjal sanjal@redhat.com Gerrit-Reviewer: Timothy Asir tjeyasin@redhat.com Gerrit-Reviewer: oVirt Jenkins CI Server
Adam Litke has posted comments on this change.
Change subject: Added gluster tag support in getAllTasks() ......................................................................
Patch Set 4: (1 inline comment)
.................................................... File vdsm/storage/taskManager.py Line 113: taskID) Line 114: self.log.debug("Return: %s", subRes) Line 115: return subRes Line 116: Line 117: def getAllTasks(self, tag=[]): In my opinion we should convert task to represent the tags as a proper list. That is probably more work though and we'd need to make sure that it didn't break any existing functionality.
Given the completely broken behavior of the old API, I doubt that changing tags to a list would break anything though. Line 118: """ Line 119: Return Tasks for all public tasks if param tag is empty, Line 120: else return tasks those tags are in param tag. Line 121: """
-- To view, visit http://gerrit.ovirt.org/7579 To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: comment Gerrit-Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432 Gerrit-PatchSet: 4 Gerrit-Project: vdsm Gerrit-Branch: master Gerrit-Owner: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Adam Litke agl@us.ibm.com Gerrit-Reviewer: Ayal Baron abaron@redhat.com Gerrit-Reviewer: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Dan Kenigsberg danken@redhat.com Gerrit-Reviewer: Federico Simoncelli fsimonce@redhat.com Gerrit-Reviewer: Saggi Mizrahi smizrahi@redhat.com Gerrit-Reviewer: Shireesh Anjal sanjal@redhat.com Gerrit-Reviewer: Timothy Asir tjeyasin@redhat.com Gerrit-Reviewer: oVirt Jenkins CI Server
Bala.FA has posted comments on this change.
Change subject: Added gluster tag support in getAllTasks() ......................................................................
Patch Set 4: (1 inline comment)
.................................................... File vdsm/storage/taskManager.py Line 113: taskID) Line 114: self.log.debug("Return: %s", subRes) Line 115: return subRes Line 116: Line 117: def getAllTasks(self, tag=[]): Ok. I will come up with a fix for this. Line 118: """ Line 119: Return Tasks for all public tasks if param tag is empty, Line 120: else return tasks those tags are in param tag. Line 121: """
-- To view, visit http://gerrit.ovirt.org/7579 To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: comment Gerrit-Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432 Gerrit-PatchSet: 4 Gerrit-Project: vdsm Gerrit-Branch: master Gerrit-Owner: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Adam Litke agl@us.ibm.com Gerrit-Reviewer: Ayal Baron abaron@redhat.com Gerrit-Reviewer: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Dan Kenigsberg danken@redhat.com Gerrit-Reviewer: Federico Simoncelli fsimonce@redhat.com Gerrit-Reviewer: Saggi Mizrahi smizrahi@redhat.com Gerrit-Reviewer: Shireesh Anjal sanjal@redhat.com Gerrit-Reviewer: Timothy Asir tjeyasin@redhat.com Gerrit-Reviewer: oVirt Jenkins CI Server
Bala.FA has abandoned this change.
Change subject: Added gluster tag support in getAllTasks() ......................................................................
Patch Set 5: Abandoned
This approach is not valid for new design tracked at http://gerrit.ovirt.org/#/c/10200/
-- To view, visit http://gerrit.ovirt.org/7579 To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: abandon Gerrit-Change-Id: I9c765cbfebb5ba22f0d21efa04c824ea4daf6432 Gerrit-PatchSet: 5 Gerrit-Project: vdsm Gerrit-Branch: master Gerrit-Owner: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Adam Litke agl@us.ibm.com Gerrit-Reviewer: Ayal Baron abaron@redhat.com Gerrit-Reviewer: Bala.FA barumuga@redhat.com Gerrit-Reviewer: Dan Kenigsberg danken@redhat.com Gerrit-Reviewer: Federico Simoncelli fsimonce@redhat.com Gerrit-Reviewer: Saggi Mizrahi smizrahi@redhat.com Gerrit-Reviewer: Shireesh Anjal sanjal@redhat.com Gerrit-Reviewer: Timothy Asir tjeyasin@redhat.com Gerrit-Reviewer: Yaniv Bronhaim ybronhei@redhat.com Gerrit-Reviewer: oVirt Jenkins CI Server
Jenkins CI has posted comments on this change.
Change subject: Added gluster tag support in getAllTasks() ......................................................................
Patch Set 4:
Abandoned due to no activity - please restore if still relevant
Jenkins CI RO has posted comments on this change.
Change subject: Added gluster tag support in getAllTasks() ......................................................................
Patch Set 4:
Abandoned due to no activity - please restore if still relevant
vdsm-patches@lists.fedorahosted.org