Ala Hino has uploaded a new change for review.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
gluster: Allow gluster mount with additional servers
Currently, engine supports mounting single gluster server. With this change, mount will include the other two gluster servers defined in the replica. To do so, vdsm uses gluster get info api in order to get the IPs of the other two servers and then, builds mount command using gluster 'backup-volfile-servers' option.
Change-Id: I2478a5edc1fc9d24eb96d64a32a98a2467ce2989 Bug-Url: https://bugzilla.redhat.com/1177777 Signed-off-by: Ala Hino ahino@redhat.com --- A tests/glusterStorageServerTests.py M vdsm/storage/storageServer.py M vdsm/storage/storage_exception.py M vdsm/supervdsmServer 4 files changed, 146 insertions(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/65/40665/1
diff --git a/tests/glusterStorageServerTests.py b/tests/glusterStorageServerTests.py new file mode 100644 index 0000000..8af184e --- /dev/null +++ b/tests/glusterStorageServerTests.py @@ -0,0 +1,35 @@ + +from testlib import VdsmTestCase +from storage.storageServer import GlusterFSConnection + +class GlusterFSConnectionTests(VdsmTestCase): + + def testParsingGlusterPath(self): + gluster = GlusterFSConnection("10.20.30.40:/my_lovely_vol", "") + remotePath = gluster._getGlusterServerAndVolume() + self.assertEquals(remotePath[0], "10.20.30.40") + self.assertEquals(remotePath[1], "my_lovely_vol") + + def testPreparingGlusterVolumeInfoCmd(self): + gluster = GlusterFSConnection("10.20.30.40:/my_lovely_vol", "") + command = gluster._getGlusterVolCmd("10.20.30.40", "my_lovely_vol", "--xml") + self.assertEquals(command, + ['/usr/sbin/gluster', + '--mode=script', + 'volume', + 'info', + 'my_lovely_vol', + '--remote-host=10.20.30.40', + '--xml' + ] + ) + + def testParsingGlusterVolumeInfoXml(self): + xml = r'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><cliOutput><opRet>0</opRet><opErrno>0</opErrno><opErrstr/><volInfo><volumes><volume><name>ala_volume</name><id>7644c01a-72a7-420d-9a08a66002f5c9fc</id><status>1</status><statusStr>Started</statusStr><brickCount>3</brickCount><distCount>1</distCount><stripeCount>1</stripeCount><replicaCount>1</replicaCount><disperseCount>0</disperseCount><redundancyCount>0</redundancyCount><type>0</type><typeStr>Distribute</typeStr><transport>0</transport> <xlators/><bricks><brick uuid="dcf2d5d9-fee8-4dd2-aaf1-ca2021b596c4">10.35.160.202:/home/ala_volume<name>10.35.160.202:/home/ala_volume</name><hostUuid>dcf2d5d9-fee8-4dd2-aaf1-ca2021b596c4</hostUuid></brick><brick uuid="3f208209-dec5-49e2-a3e1-1028dea32cf6">10.35.160.6:/home/ala_volume<name>10.35.160.6:/home/ala_volume</name><hostUuid>3f208209-dec5-49e2-a3e1-1028dea32cf6</hostUuid></brick><brick uuid="b9d8d956-e1ab-4a0a-8ea5-989c4957a9f1">10.35.160.203:/home/ala_volume<! name>10.35.160.203:/home/ala_volume</name><hostUuid>b9d8d956-e1ab-4a0a-8ea5-989c4957a9f1</hostUuid></brick></bricks><optCount>0</optCount><options/></volume><count>1</count></volumes></volInfo></cliOutput>' + + gluster = GlusterFSConnection("10.20.30.40:/my_lovely_vol", "") + servers = gluster._getGlusterBackupServersFromVolInfoXml(xml, "10.35.160.202") + self.assertEquals(servers, ["10.35.160.6", "10.35.160.203"]) + + backupOpts = gluster._getGlusterBackupServersOption(servers) + self.assertEquals(backupOpts, "backup-volfile-servers=10.35.160.6:10.35.160.203") diff --git a/vdsm/storage/storageServer.py b/vdsm/storage/storageServer.py index 73ebcfe..14b2039 100644 --- a/vdsm/storage/storageServer.py +++ b/vdsm/storage/storageServer.py @@ -29,10 +29,13 @@ from functools import partial import six import sys +import xml.etree.ElementTree as ET
from vdsm.compat import pickle from vdsm.config import config from vdsm import udevadm +from vdsm import utils +import supervdsm
import mount import fileUtils @@ -41,6 +44,15 @@ from sync import asyncmethod, AsyncCallStub from mount import MountError import storage_exception as se + + +_glusterCommandPath = utils.CommandPath("gluster", "/usr/sbin/gluster") + + +if hasattr(ET, 'ParseError'): + _etreeExceptions = (ET.ParseError, AttributeError, ValueError) +else: + _etreeExceptions = (SyntaxError, AttributeError, ValueError)
class AliasAlreadyRegisteredError(RuntimeError): @@ -260,6 +272,7 @@
class GlusterFSConnection(object): + log = logging.getLogger("Storage.StorageServer.GlusterFSConnection")
def __init__(self, spec, vfsType="glusterfs", options=""): self._vfsType = vfsType @@ -267,10 +280,24 @@ self._options = options
def connect(self): + remotePath = self._getGlusterServerAndVolume() + primaryServer = remotePath[0] + volume = remotePath[1] + self.log.debug("Gluster primary sever: %s Volume: %s", + primaryServer, + volume) + xml = supervdsm.getProxy().getGlusterVolumeInfo(primaryServer, volume, + "--xml") + self.log.debug("Volume info xml: %s", xml) + servers = self._getGlusterBackupServersFromVolInfoXml(xml, + primaryServer) + self.log.debug("Gluster backup servers: %s", servers) + optionsString = self._getGlusterBackupServersOption(servers) + self.log.debug("Gluster options: %s", optionsString) localPath = self._getLocalPath() mountCon = MountConnection(self._remotePath, "glusterfs", - self._options, + optionsString, localPath ) return mountCon.connect() @@ -282,6 +309,7 @@ self._options, localPath ) + return mountCon.isConnected()
def disconnect(self): @@ -299,6 +327,67 @@ "glusterSD" )
+ def _getGlusterServerAndVolume(self): + remotePath = self._remotePath.split(":/") + return (remotePath[0], remotePath[1]) + + def _getGlusterBackupServersFromVolInfoXml(self, xml, primaryServer): + servers = [] + try: + root = ET.fromstring('\n'.join(xml)) + except _etreeExceptions: + root = ET.fromstring(xml) + + for brick in root.iter('brick'): + servers.append(brick.text.split(":/")[0]) + servers.remove(primaryServer) + + return servers + + def _getGlusterBackupServersOption(self, servers): + optionsString = "backup-volfile-servers=%s:%s" % (servers[0], + servers[1]) + + return optionsString + + def _getGlusterVolCmd(self, server, volume, options=None): + command = [_glusterCommandPath.cmd, "--mode=script", "volume"] + command += ["info"] + + if volume: + command.append(volume) + + if server: + command += ['--remote-host=%s' % server] + + if options: + command.append(options) + + self.log.debug("Gluster volume info command: %s", command) + return command + + def _execGlusterXml(self, cmd): + rc, out, err = utils.execCmd(cmd) + if rc != 0: + raise se.GlusterCmdExecFailedException(rc, out, err) + try: + tree = ET.fromstring('\n'.join(out)) + rv = int(tree.find('opRet').text) + msg = tree.find('opErrstr').text + errNo = int(tree.find('opErrno').text) + except _etreeExceptions: + raise se.GlusterXmlErrorException(err=out) + if rv == 0: + return out + else: + if errNo != 0: + rv = errNo + raise se.GlusterCmdFailedException(rc=rv, err=[msg]) + + def _getGlusterVolumeInfo(self, server, volume, options=None): + command = self._getGlusterVolCmd(server, volume, options) + return self._execGlusterXml(command) + def __eq__(self, other): if not isinstance(other, GlusterFSConnection): return False diff --git a/vdsm/storage/storage_exception.py b/vdsm/storage/storage_exception.py index 1cfc8e4..74e9e7b 100644 --- a/vdsm/storage/storage_exception.py +++ b/vdsm/storage/storage_exception.py @@ -1514,6 +1514,21 @@ issue and how to resolve it"""
+class GlusterCmdExecFailedException(StorageException): + code = 615 + message = "Command execution failed" + + +class GlusterXmlErrorException(StorageException): + code = 616 + message = "XML error" + + +class GlusterCmdFailedException(StorageException): + code = 617 + message = "Command failed" + + ################################################# # SPM/HSM Exceptions ################################################# diff --git a/vdsm/supervdsmServer b/vdsm/supervdsmServer index ef7a710..d833032 100755 --- a/vdsm/supervdsmServer +++ b/vdsm/supervdsmServer @@ -32,6 +32,7 @@ import logging import logging.config from vdsm.infra import sigutils +from storage.storageServer import GlusterFSConnection
import numaUtils
@@ -415,6 +416,11 @@ return hba._rescan()
@logDecorator + def getGlusterVolumeInfo(self, server, volume, options=None): + gluster = GlusterFSConnection("", "") + return gluster._getGlusterVolumeInfo(server, volume, options) + + @logDecorator def set_rp_filter_loose(self, dev): sysctl.set_rp_filter_loose(dev)
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 1:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 1:
Please fix pep8 violations - see http://jenkins.ovirt.org/job/vdsm_master_pep8_gerrit/18717/console
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 1:
(22 comments)
General comment - you parse gluster output multiple times for no reason.
You should: - Parse the returned xml once - and return the etree object instead of the string. - Get the data you need from the etree objects
https://gerrit.ovirt.org/#/c/40665/1/tests/glusterStorageServerTests.py File tests/glusterStorageServerTests.py:
Line 23: '--xml' Line 24: ] Line 25: ) Line 26: Line 27: def testParsingGlusterVolumeInfoXml(self): Use
xml = """ <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
</xml> """ Line 28: xml = r'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><cliOutput><opRet>0</opRet><opErrno>0</opErrno><opErrstr/><volInfo><volumes><volume><name>ala_volume</name><id>7644c01a-72a7-420d-9a08a66002f5c9fc</id><status>1</status><statusStr>Started</statusStr><brickCount>3</brickCount><distCount>1</distCount><stripeCount>1</stripeCount><replicaCount>1</replicaCount><disperseCount>0</disperseCount><redundancyCount>0</redundancyCount><type>0</type><typeStr>Distribute</typeStr><transport>0</transport> <xlators/><bricks><brick uuid="dcf2d5d9-fee8-4dd2-aaf1-ca2021b596c4">10.35.160.202:/home/ala_volume<name>10.35.160.202:/home/ala_volume</name><hostUuid>dcf2d5d9-fee8-4dd2-aaf1-ca2021b596c4</hostUuid></brick><brick uuid="3f208209-dec5-49e2-a3e1-1028dea32cf6">10.35.160.6:/home/ala_volume<name>10.35.160.6:/home/ala_volume</name><hostUuid>3f208209-dec5-49e2-a3e1-1028dea32cf6</hostUuid></brick><brick uuid="b9d8d956-e1ab-4a0a-8ea5-989c4957a9f1">10.35.160.203:/home/ala! _volume<name>10.35.160.203:/home/ala_volume</name><hostUuid>b9d8d956-e1ab-4a0a-8ea5-989c4957a9f1</hostUuid></brick></bricks><optCount>0</optCount><options/></volume><count>1</count></volumes></volInfo></cliOutput>' Line 29: Line 30: gluster = GlusterFSConnection("10.20.30.40:/my_lovely_vol", "") Line 31: servers = gluster._getGlusterBackupServersFromVolInfoXml(xml, "10.35.160.202")
https://gerrit.ovirt.org/#/c/40665/1/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 258 Line 259 Line 260 Line 261 Line 262 Should be GlusterfsConnection
Line 261 Line 262 Line 263 Line 264 Line 265 Why do you want to support glusterfs with different vfstype? This should be a class constant.
Line 262 Line 263 Line 264 Line 265 Line 266 All names must be lower_case except ClassName and CONSTANTS (pep8)
Line 268 Line 269 Line 270 Line 271 Line 272 You are not using the specified vfsType - use the class constant.
Line 34: from vdsm.compat import pickle Line 35: from vdsm.config import config Line 36: from vdsm import udevadm Line 37: from vdsm import utils Line 38: import supervdsm This should be in the next group of imports, not in the vdsm group (lib/vdsm) Line 39: Line 40: import mount Line 41: import fileUtils Line 42: import fileSD
Line 45: from mount import MountError Line 46: import storage_exception as se Line 47: Line 48: Line 49: _glusterCommandPath = utils.CommandPath("gluster", "/usr/sbin/gluster") This is a constant, use UPPERCASE Line 50: Line 51: Line 52: if hasattr(ET, 'ParseError'): Line 53: _etreeExceptions = (ET.ParseError, AttributeError, ValueError)
Line 51: Line 52: if hasattr(ET, 'ParseError'): Line 53: _etreeExceptions = (ET.ParseError, AttributeError, ValueError) Line 54: else: Line 55: _etreeExceptions = (SyntaxError, AttributeError, ValueError) Why do we need this? we use etree elsewhere and we don't need this. Line 56: Line 57: Line 58: class AliasAlreadyRegisteredError(RuntimeError): Line 59: pass
Line 271: return hash(type(self)) ^ hash(self._mount) Line 272: Line 273: Line 274: class GlusterFSConnection(object): Line 275: log = logging.getLogger("Storage.StorageServer.GlusterFSConnection") This is too specific, Storage.StorageServer is enough. Line 276: Line 277: def __init__(self, spec, vfsType="glusterfs", options=""): Line 278: self._vfsType = vfsType Line 279: self._remotePath = spec
Line 279: self._remotePath = spec Line 280: self._options = options Line 281: Line 282: def connect(self): Line 283: remotePath = self._getGlusterServerAndVolume() remotePath does not make sense for keeping getGlusterServerAndVolume result. Line 284: primaryServer = remotePath[0] Line 285: volume = remotePath[1] Line 286: self.log.debug("Gluster primary sever: %s Volume: %s", Line 287: primaryServer,
Line 280: self._options = options Line 281: Line 282: def connect(self): Line 283: remotePath = self._getGlusterServerAndVolume() Line 284: primaryServer = remotePath[0] You want something like:
primary_server, volume = self._get_volume_info() Line 285: volume = remotePath[1] Line 286: self.log.debug("Gluster primary sever: %s Volume: %s", Line 287: primaryServer, Line 288: volume)
Line 335: servers = [] Line 336: try: Line 337: root = ET.fromstring('\n'.join(xml)) Line 338: except _etreeExceptions: Line 339: root = ET.fromstring(xml) Why do we have to parse twice? Line 340: Line 341: for brick in root.iter('brick'): Line 342: servers.append(brick.text.split(":/")[0]) Line 343: servers.remove(primaryServer)
Line 347: def _getGlusterBackupServersOption(self, servers): Line 348: optionsString = "backup-volfile-servers=%s:%s" % (servers[0], Line 349: servers[1]) Line 350: Line 351: return optionsString Move here all the code generating the option string Line 352: Line 353: def _getGlusterVolCmd(self, server, volume, options=None): Line 354: command = [_glusterCommandPath.cmd, "--mode=script", "volume"] Line 355: command += ["info"]
Line 351: return optionsString Line 352: Line 353: def _getGlusterVolCmd(self, server, volume, options=None): Line 354: command = [_glusterCommandPath.cmd, "--mode=script", "volume"] Line 355: command += ["info"] Use command.append()/extend() instead of += Line 356: Line 357: if volume: Line 358: command.append(volume) Line 359:
Line 362: Line 363: if options: Line 364: command.append(options) Line 365: Line 366: self.log.debug("Gluster volume info command: %s", command) Not needed, we log executed commands in execCmd Line 367: return command Line 368: Line 369: def _execGlusterXml(self, cmd): Line 370: rc, out, err = utils.execCmd(cmd)
Line 366: self.log.debug("Gluster volume info command: %s", command) Line 367: return command Line 368: Line 369: def _execGlusterXml(self, cmd): Line 370: rc, out, err = utils.execCmd(cmd) Use raw=True to get gluster output as is, without splitting to lines. Line 371: if rc != 0: Line 372: raise se.GlusterCmdExecFailedException(rc, out, err) Line 373: try: Line 374: tree = ET.fromstring('\n'.join(out))
Line 368: Line 369: def _execGlusterXml(self, cmd): Line 370: rc, out, err = utils.execCmd(cmd) Line 371: if rc != 0: Line 372: raise se.GlusterCmdExecFailedException(rc, out, err) Too long - use GlusterCommandError Line 373: try: Line 374: tree = ET.fromstring('\n'.join(out)) Line 375: rv = int(tree.find('opRet').text) Line 376: msg = tree.find('opErrstr').text
Line 370: rc, out, err = utils.execCmd(cmd) Line 371: if rc != 0: Line 372: raise se.GlusterCmdExecFailedException(rc, out, err) Line 373: try: Line 374: tree = ET.fromstring('\n'.join(out)) No need to join if you used raw=True in execCmd Line 375: rv = int(tree.find('opRet').text) Line 376: msg = tree.find('opErrstr').text Line 377: errNo = int(tree.find('opErrno').text) Line 378: except _etreeExceptions:
Line 373: try: Line 374: tree = ET.fromstring('\n'.join(out)) Line 375: rv = int(tree.find('opRet').text) Line 376: msg = tree.find('opErrstr').text Line 377: errNo = int(tree.find('opErrno').text) rv and rc are confusing - use the terms used in the gluster message - ret, errstr, errno Line 378: except _etreeExceptions: Line 379: raise se.GlusterXmlErrorException(err=out) Line 380: if rv == 0: Line 381: return out
Line 375: rv = int(tree.find('opRet').text) Line 376: msg = tree.find('opErrstr').text Line 377: errNo = int(tree.find('opErrno').text) Line 378: except _etreeExceptions: Line 379: raise se.GlusterXmlErrorException(err=out) You are hiding etree error message.
And the error name is too long and contains both "Error" and "Exception" - use only "Error". Line 380: if rv == 0: Line 381: return out Line 382: else: Line 383: if errNo != 0:
Line 381: return out Line 382: else: Line 383: if errNo != 0: Line 384: rv = errNo Line 385: raise se.GlusterCmdFailedException(rc=rv, err=[msg]) First handle errors and raise. The normal flow should be simple:
if error: raise ...
return out Line 386: Line 387: def _getGlusterVolumeInfo(self, server, volume, options=None): Line 388: command = self._getGlusterVolCmd(server, volume, options) Line 389: return self._execGlusterXml(command)
Line 385: raise se.GlusterCmdFailedException(rc=rv, err=[msg]) Line 386: Line 387: def _getGlusterVolumeInfo(self, server, volume, options=None): Line 388: command = self._getGlusterVolCmd(server, volume, options) Line 389: return self._execGlusterXml(command) Please inline self._execGlusterXml here. Line 390: Line 391: def __eq__(self, other): Line 392: if not isinstance(other, GlusterFSConnection): Line 393: return False
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 1:
(3 comments)
https://gerrit.ovirt.org/#/c/40665/1/tests/glusterStorageServerTests.py File tests/glusterStorageServerTests.py:
Line 5: class GlusterFSConnectionTests(VdsmTestCase): Line 6: Line 7: def testParsingGlusterPath(self): Line 8: gluster = GlusterFSConnection("10.20.30.40:/my_lovely_vol", "") Line 9: remotePath = gluster._getGlusterServerAndVolume() Don't test private methods, this makes it harder to modify the object. Line 10: self.assertEquals(remotePath[0], "10.20.30.40") Line 11: self.assertEquals(remotePath[1], "my_lovely_vol") Line 12: Line 13: def testPreparingGlusterVolumeInfoCmd(self):
Line 11: self.assertEquals(remotePath[1], "my_lovely_vol") Line 12: Line 13: def testPreparingGlusterVolumeInfoCmd(self): Line 14: gluster = GlusterFSConnection("10.20.30.40:/my_lovely_vol", "") Line 15: command = gluster._getGlusterVolCmd("10.20.30.40", "my_lovely_vol", "--xml") Don't test private methods, instead mock execCmd and check if gluster was run correctly. Line 16: self.assertEquals(command, Line 17: ['/usr/sbin/gluster', Line 18: '--mode=script', Line 19: 'volume',
Line 30: gluster = GlusterFSConnection("10.20.30.40:/my_lovely_vol", "") Line 31: servers = gluster._getGlusterBackupServersFromVolInfoXml(xml, "10.35.160.202") Line 32: self.assertEquals(servers, ["10.35.160.6", "10.35.160.203"]) Line 33: Line 34: backupOpts = gluster._getGlusterBackupServersOption(servers) Don't test private methods
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 1:
(4 comments)
https://gerrit.ovirt.org/#/c/40665/1//COMMIT_MSG Commit Message:
Line 9: Currently, engine supports mounting single gluster server. With this change, Line 10: mount will include the other two gluster servers defined in the replica. To do Line 11: so, vdsm uses gluster get info api in order to get the IPs of the other two Line 12: servers and then, builds mount command using gluster 'backup-volfile-servers' Line 13: option. Please show the options that the users had to write manually before, and now written by vdsm. Line 14: Line 15: Change-Id: I2478a5edc1fc9d24eb96d64a32a98a2467ce2989 Line 16: Bug-Url: https://bugzilla.redhat.com/1177777
https://gerrit.ovirt.org/#/c/40665/1/vdsm/storage/storage_exception.py File vdsm/storage/storage_exception.py:
Line 1515: Line 1516: Line 1517: class GlusterCmdExecFailedException(StorageException): Line 1518: code = 615 Line 1519: message = "Command execution failed" What command? Line 1520: Line 1521: Line 1522: class GlusterXmlErrorException(StorageException): Line 1523: code = 616
Line 1520: Line 1521: Line 1522: class GlusterXmlErrorException(StorageException): Line 1523: code = 616 Line 1524: message = "XML error" Very unclear message. Line 1525: Line 1526: Line 1527: class GlusterCmdFailedException(StorageException): Line 1528: code = 617
Line 1525: Line 1526: Line 1527: class GlusterCmdFailedException(StorageException): Line 1528: code = 617 Line 1529: message = "Command failed" What is the difference between GlusterCmdExecFailedException and GlusterCmdFailedException? Line 1530: Line 1531: Line 1532: ################################################# Line 1533: # SPM/HSM Exceptions
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 1:
(1 comment)
https://gerrit.ovirt.org/#/c/40665/1/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 258 Line 259 Line 260 Line 261 Line 262 Most of the code here is not related to gluster connection, and is about getting gluster volume info. Please move the code related to getting this info elsewhere. This will also avoid the need to test all those private methods, since they will be public method of the object getting the info.
This class should use some gluster volume info function to get the info. Did you look in vdsm/storage/glusterVolume.py? isn't getVolumeInfo what you need?
We should see here very simple flow:
def connect(): volinfo = gluster.get_volume_info() build mount options using volinfo.backup_servers create mount connection connect
For testing we can mock the get_volume_info to return what we like, and verify that we run the mount (also mocked) with the correct options.
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 2:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 3:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 4:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 4:
(10 comments)
https://gerrit.ovirt.org/#/c/40665/4/tests/glusterStorageServerTests.py File tests/glusterStorageServerTests.py:
Line 41: 'volumeType': 'REPLICATE'}} Line 42: Line 43: @MonkeyPatch(storageServer, 'supervdsm', Supervdsm()) Line 44: def testGetVolumeInfo(self): Line 45: def glusterVolumeInfo(volumeName=None, remoteServer=None): It would be nice to assert that this volumeName and remoteServer have the correct value. Line 46: return self.volumeInfo Line 47: Line 48: storageServer.supervdsm.glusterVolumeInfo = glusterVolumeInfo Line 49:
Line 47: Line 48: storageServer.supervdsm.glusterVolumeInfo = glusterVolumeInfo Line 49: Line 50: gluster = GlusterFSConnection("192.168.122.1:/music", "") Line 51: self.assertEquals(gluster._getGlusterMountOptions(), Lets not check private functions. Instead, mock the mount command and check that connect() is sending the correct options.
https://gerrit.ovirt.org/#/c/40665/4/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 31 Line 32 Line 33 Line 34 Line 35 Add blank line before the supervdsm import. It belongs to the next group, not to vdsm group.
Line 251: DIR = "glusterSD" Line 252: Line 253: def connect(self): Line 254: glusterMountOptions = self._getGlusterMountOptions() Line 255: self._options += glusterMountOptions This code means that each time you call connect, gluster mount options are appended to self._options.
You want to check the options and add them to self._options only once.
This can be done by initializing in __init__, but we don't want to do stuff in __init__, this leads to bad things and objects were are hard to test.
So we can do this using lazy initialization:
@property def options(self): if self._gluster_options is None: self._gluster_options = self._get_gluster_mount_options() return self._options + self._gluster_options
MountConnection should access self.options instead of self._options.
Since you are using a property, this is now a readonly value, and other code cannot change it.
Having readonly public property, annoying reviewers will not complain abut testing private methods ;-) Line 256: super(GlusterFSConnection, self).connect() Line 257: Line 258: def _getGlusterMountOptions(self): Line 259: remotePath = self._remotePath.split(":/")
Line 255: self._options += glusterMountOptions Line 256: super(GlusterFSConnection, self).connect() Line 257: Line 258: def _getGlusterMountOptions(self): Line 259: remotePath = self._remotePath.split(":/") Are you sure that all volumes are using ":/"? Plese see the discussion in https://gerrit.ovirt.org/36237 about this. Line 260: glusterServer = remotePath[0] Line 261: volume = remotePath[1] Line 262: volInfo = supervdsm.getProxy().glusterVolumeInfo(volume, Line 263: glusterServer)
Line 257: Line 258: def _getGlusterMountOptions(self): Line 259: remotePath = self._remotePath.split(":/") Line 260: glusterServer = remotePath[0] Line 261: volume = remotePath[1] The Python way to do this split is to use tuple unpacking:
server, volume = remotePath.split(":/", 1)
Note that I'm spliting once, in case volume name contains also ":/" - maybe this is impossible, but there is no reason to split more then once anyway. Line 262: volInfo = supervdsm.getProxy().glusterVolumeInfo(volume, Line 263: glusterServer) Line 264: servers = self._getGlusterServers(volume, volInfo) Line 265: servers.remove(glusterServer)
Line 262: volInfo = supervdsm.getProxy().glusterVolumeInfo(volume, Line 263: glusterServer) Line 264: servers = self._getGlusterServers(volume, volInfo) Line 265: servers.remove(glusterServer) Line 266: return "backup-volfile-servers=%s:%s" % (servers[0], servers[1]) This assume replica 3 - we want to support any replica 1, 3 and maybe other.
The correct way would be:
return "backup-volfile-servers=" + ":".join(servers)
We also want to reject replica 2, but lets do this in another patch. Line 267: Line 268: def _getGlusterServers(self, volume, volInfo): Line 269: servers = [] Line 270: bricks = volInfo[volume]['bricks']
Line 264: servers = self._getGlusterServers(volume, volInfo) Line 265: servers.remove(glusterServer) Line 266: return "backup-volfile-servers=%s:%s" % (servers[0], servers[1]) Line 267: Line 268: def _getGlusterServers(self, volume, volInfo): The parameter volume is not very clear - is this a volume object or volume name? better use more descriptive name. Line 269: servers = [] Line 270: bricks = volInfo[volume]['bricks'] Line 271: for brick in bricks: Line 272: server = brick.split(":/")[0]
Line 268: def _getGlusterServers(self, volume, volInfo): Line 269: servers = [] Line 270: bricks = volInfo[volume]['bricks'] Line 271: for brick in bricks: Line 272: server = brick.split(":/")[0] Check also here that volumes and servers are always separated by ":/" - since we could care less about the volume name in this code, why not split by ":"? Line 273: servers.append(server) Line 274: Line 275: return servers Line 276:
Line 271: for brick in bricks: Line 272: server = brick.split(":/")[0] Line 273: servers.append(server) Line 274: Line 275: return servers The pythonic way to do this is:
return [brick.split(":/")[0] for brick in volInfo[volume]['bricks']]
And we don't need a function for this, you can simply inline this in the caller:
servers = [brick.split(":/")[0] for brick in volInfo[volume]['bricks']] Line 276: Line 277: Line 278: class NFSConnection(object): Line 279: DEFAULT_OPTIONS = ["soft", "nosharecache"]
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 5:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 5: Code-Review-1
Please address my comments in version 4.
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 5:
(5 comments)
https://gerrit.ovirt.org/#/c/40665/5/tests/glusterStorageServerTests.py File tests/glusterStorageServerTests.py:
Line 17: class FakeMount(object): Line 18: Line 19: def __init__(self, spec, localPath): Line 20: self._remotePath = spec Line 21: self._localPath = localPath Use public names (self.remotePath etc.), we want to peek into these values Line 22: self.fs_file = "" Line 23: Line 24: def isMounted(self): Line 25: return False
Line 21: self._localPath = localPath Line 22: self.fs_file = "" Line 23: Line 24: def isMounted(self): Line 25: return False Make it return self.mounted - so we can test the behavior when remounting. Line 26: Line 27: def mount(self, options, vfsType): Line 28: self._options = options Line 29: self._vfsType = vfsType
Line 28: self._options = options Line 29: self._vfsType = vfsType Line 30: Line 31: def getRecord(self): Line 32: return self Simple and nice! Line 33: Line 34: Line 35: class GlusterFSConnectionTests(VdsmTestCase): Line 36:
Line 77: gluster = GlusterFSConnection(spec="192.168.122.1:/music", Line 78: mountClass=FakeMount) Line 79: try: Line 80: gluster.connect() Line 81: self.assertEquals(gluster._mount._options, self.MOUNT_OPTIONS) You don't need to connect to check the mount options - please see my comment about this in version 4.
I think what we need is:
- Simple test that requires no mocking, checking that we create the correct mount options - no need to call connect. - Test that gluster connection create the correct mountpoint directory - Test that mount is created with the correct __init__ arguments - Test that mount.mount is called with the correct arguments Line 82: finally:
Line 79: try: Line 80: gluster.connect() Line 81: self.assertEquals(gluster._mount._options, self.MOUNT_OPTIONS) Line 82: finally: Line 83: fileUtils.cleanupdir(gluster._getLocalPath()) When we use a fake, we do *not* want to create directories in the real location. We need to create a temporary directory (see testlib.namedTemporaryDir), which will cleanup automatically without wiring explicit try-finally block.
You want something like:
with testlib.namedTemporaryDir() as tmpdir: with monkeyPatchScope([(GlusterfsConnection, "localPathBase", tmpdir)]): gluster.connect() # Check that connect did the right thing
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 6:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 7:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Ala Hino has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 5:
(5 comments)
https://gerrit.ovirt.org/#/c/40665/5/tests/glusterStorageServerTests.py File tests/glusterStorageServerTests.py:
Line 17: class FakeMount(object): Line 18: Line 19: def __init__(self, spec, localPath): Line 20: self._remotePath = spec Line 21: self._localPath = localPath
Use public names (self.remotePath etc.), we want to peek into these values
Done Line 22: self.fs_file = "" Line 23: Line 24: def isMounted(self): Line 25: return False
Line 21: self._localPath = localPath Line 22: self.fs_file = "" Line 23: Line 24: def isMounted(self): Line 25: return False
Make it return self.mounted - so we can test the behavior when remounting.
Done Line 26: Line 27: def mount(self, options, vfsType): Line 28: self._options = options Line 29: self._vfsType = vfsType
Line 28: self._options = options Line 29: self._vfsType = vfsType Line 30: Line 31: def getRecord(self): Line 32: return self
Simple and nice!
Done Line 33: Line 34: Line 35: class GlusterFSConnectionTests(VdsmTestCase): Line 36:
Line 77: gluster = GlusterFSConnection(spec="192.168.122.1:/music", Line 78: mountClass=FakeMount) Line 79: try: Line 80: gluster.connect() Line 81: self.assertEquals(gluster._mount._options, self.MOUNT_OPTIONS)
You don't need to connect to check the mount options - please see my commen
Updated unit test to test gluster mount options. Other tests to be addressed in separate patches. Line 82: finally:
Line 79: try: Line 80: gluster.connect() Line 81: self.assertEquals(gluster._mount._options, self.MOUNT_OPTIONS) Line 82: finally: Line 83: fileUtils.cleanupdir(gluster._getLocalPath())
When we use a fake, we do *not* want to create directories in the real loca
This will be relevant when adding the other tests mentioned in previous comment.
Ala Hino has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 4:
(8 comments)
https://gerrit.ovirt.org/#/c/40665/4/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 31 Line 32 Line 33 Line 34 Line 35
Add blank line before the supervdsm import. It belongs to the next group, n
Done
Line 251: DIR = "glusterSD" Line 252: Line 253: def connect(self): Line 254: glusterMountOptions = self._getGlusterMountOptions() Line 255: self._options += glusterMountOptions
This code means that each time you call connect, gluster mount options are
Done Line 256: super(GlusterFSConnection, self).connect() Line 257: Line 258: def _getGlusterMountOptions(self): Line 259: remotePath = self._remotePath.split(":/")
Line 255: self._options += glusterMountOptions Line 256: super(GlusterFSConnection, self).connect() Line 257: Line 258: def _getGlusterMountOptions(self): Line 259: remotePath = self._remotePath.split(":/")
Are you sure that all volumes are using ":/"? Plese see the discussion in h
Done according to the discussion. Line 260: glusterServer = remotePath[0] Line 261: volume = remotePath[1] Line 262: volInfo = supervdsm.getProxy().glusterVolumeInfo(volume, Line 263: glusterServer)
Line 257: Line 258: def _getGlusterMountOptions(self): Line 259: remotePath = self._remotePath.split(":/") Line 260: glusterServer = remotePath[0] Line 261: volume = remotePath[1]
The Python way to do this split is to use tuple unpacking:
Done Line 262: volInfo = supervdsm.getProxy().glusterVolumeInfo(volume, Line 263: glusterServer) Line 264: servers = self._getGlusterServers(volume, volInfo) Line 265: servers.remove(glusterServer)
Line 262: volInfo = supervdsm.getProxy().glusterVolumeInfo(volume, Line 263: glusterServer) Line 264: servers = self._getGlusterServers(volume, volInfo) Line 265: servers.remove(glusterServer) Line 266: return "backup-volfile-servers=%s:%s" % (servers[0], servers[1])
This assume replica 3 - we want to support any replica 1, 3 and maybe other
Done Line 267: Line 268: def _getGlusterServers(self, volume, volInfo): Line 269: servers = [] Line 270: bricks = volInfo[volume]['bricks']
Line 264: servers = self._getGlusterServers(volume, volInfo) Line 265: servers.remove(glusterServer) Line 266: return "backup-volfile-servers=%s:%s" % (servers[0], servers[1]) Line 267: Line 268: def _getGlusterServers(self, volume, volInfo):
The parameter volume is not very clear - is this a volume object or volume
Done Line 269: servers = [] Line 270: bricks = volInfo[volume]['bricks'] Line 271: for brick in bricks: Line 272: server = brick.split(":/")[0]
Line 268: def _getGlusterServers(self, volume, volInfo): Line 269: servers = [] Line 270: bricks = volInfo[volume]['bricks'] Line 271: for brick in bricks: Line 272: server = brick.split(":/")[0]
Check also here that volumes and servers are always separated by ":/" - sin
Done Line 273: servers.append(server) Line 274: Line 275: return servers Line 276:
Line 271: for brick in bricks: Line 272: server = brick.split(":/")[0] Line 273: servers.append(server) Line 274: Line 275: return servers
The pythonic way to do this is:
Done Line 276: Line 277: Line 278: class NFSConnection(object): Line 279: DEFAULT_OPTIONS = ["soft", "nosharecache"]
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 8:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 9:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 9:
(8 comments)
https://gerrit.ovirt.org/#/c/40665/9/tests/glusterStorageServerTests.py File tests/glusterStorageServerTests.py:
Line 5: Line 6: import supervdsm Line 7: Line 8: Line 9: class Supervdsm(object): Rename to FakeSupervdsm - make it more clear what is this strange empty class. Line 10: Line 11: def getProxy(self): Line 12: return self Line 13:
Line 13: Line 14: Line 15: class GlusterFSConnectionTests(VdsmTestCase): Line 16: Line 17: MOUNT_OPTIONS = "backup-volfile-servers=192.168.122.2:192.168.122.3" To prevent confusion with the second test, rename this to "computed_options", and move it into the test. Line 18: Line 19: VOLUME_INFO = \ Line 20: {'music': {'brickCount': '3', Line 21: 'bricks': ['192.168.122.1:/tmp/music',
Line 15: class GlusterFSConnectionTests(VdsmTestCase): Line 16: Line 17: MOUNT_OPTIONS = "backup-volfile-servers=192.168.122.2:192.168.122.3" Line 18: Line 19: VOLUME_INFO = \ No need for a class constant, since this is used by only one test. Just return this from the fake getVolumeInfo function in the test. Unless you plan to add more tests using same response, In which case, getVolumeInfo can be a function in the module.
def getVolumeInfo(volname, remoteserver): return { ... }
class Test(...):
@MonkeyPatch(storageServer, 'supervdsm', Supervdsm()) def test_foo(...): storageServer.supervdsm.glusterVolumeInfo = glusterVolumeInfo ...
@MonkeyPatch(storageServer, 'supervdsm', Supervdsm()) def test_bar(...): storageServer.supervdsm.glusterVolumeInfo = glusterVolumeInfo ... Line 20: {'music': {'brickCount': '3', Line 21: 'bricks': ['192.168.122.1:/tmp/music', Line 22: '192.168.122.2:/tmp/music', Line 23: '192.168.122.3:/tmp/music'],
Line 44: 'volumeType': 'REPLICATE'}} Line 45: Line 46: @MonkeyPatch(storageServer, 'supervdsm', Supervdsm()) Line 47: def testGetVolumeInfo(self): Line 48: def glusterVolumeInfo(volumeName=None, remoteServer=None): Please assert that this function is called with the correct arguments. Would be nice to test this in a separate test. Line 49: return self.VOLUME_INFO Line 50: Line 51: storageServer.supervdsm.glusterVolumeInfo = glusterVolumeInfo Line 52:
Line 53: gluster = GlusterFSConnection(spec="192.168.122.1:/music") Line 54: self.assertEquals(gluster.options, self.MOUNT_OPTIONS) Line 55: Line 56: def testProvidedGlusterVolumeInfo(self): Line 57: MOUNT_OPTIONS = "backup-volfile-servers=server1:server2" This name is confusing - use "user_options" instead. No need to make this a constant in a context of a test method. If python had nice way to do this (e.g. const, final), why not, but having random CONSTANTS in the source make it harder to read. Line 58: gluster = GlusterFSConnection(spec="192.168.122.1:/music", Line 59: options=MOUNT_OPTIONS)
https://gerrit.ovirt.org/#/c/40665/9/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 192: @classmethod Line 193: def getLocalPathBase(cls): Line 194: return cls.localPathBase Line 195: Line 196: def __init__(self, spec, vfsType=None, options="", mountClass=mount.Mount): This change is not needed for this patch, and this patch is not a refactoring. Please separate this to another patch, possible with the new FakeMount class and connect tests. Line 197: self._vfsType = vfsType Line 198: self._remotePath = spec Line 199: self._options = options Line 200: self._mount = mountClass(spec, self._getLocalPath())
Line 261: @property Line 262: def options(self): Line 263: if self._gluster_options is None: Line 264: self._gluster_options = self._get_gluster_mount_options() Line 265: return self._options + self._gluster_options Please put @properties after __init__. __init__ is the only place were you can understand what is the instance variables of this class. We want to have __init__ always first after class constants and variables.
Do not follow the bad examples in this file (MountConnection). Line 266: Line 267: def __init__(self, spec, vfsType=None, options="", mountClass=mount.Mount): Line 268: super(GlusterFSConnection, self).__init__(spec, Line 269: vfsType,
Line 272: self._gluster_options = None Line 273: Line 274: def _get_gluster_mount_options(self): Line 275: if "backup-volfile-servers" in self._options: Line 276: return "" This means that we let the user override the optimal settings.
I don't think we want to support this. If we do, we still need to check gluster options, and warn if the settings the user chose are not optimal, or fail if they are invalid.
- No-optimal settings: include only 1 server in backup-volfile-servers - Invalid settings: include non-existing brick in backup-volfile-servers.
The minimal change is to log the user preferred settings and warn that we are using user specified settings.
Please check how we behave when a user specify NFS or posix options that we also handle - I think we either fail the mount, or the mount will fail since you specified multiple options. Line 277: Line 278: volfileServer, volname = self._remotePath.split(":", 1) Line 279: volname = volname.strip('/') Line 280: volInfo = supervdsm.getProxy().glusterVolumeInfo(volname,
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 9:
(1 comment)
https://gerrit.ovirt.org/#/c/40665/9/tests/glusterStorageServerTests.py File tests/glusterStorageServerTests.py:
Please mote the tests to storageServerTests.py. The rule is one test module per module. Line 1: from monkeypatch import MonkeyPatch, MonkeyPatchScope Line 2: from testlib import VdsmTestCase Line 3: from storage.storageServer import GlusterFSConnection Line 4: from storage import storageServer
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 10:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Ala Hino has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 9:
(9 comments)
https://gerrit.ovirt.org/#/c/40665/9/tests/glusterStorageServerTests.py File tests/glusterStorageServerTests.py:
Please mote the tests to storageServerTests.py. The rule is one test module
Done Line 1: from monkeypatch import MonkeyPatch, MonkeyPatchScope Line 2: from testlib import VdsmTestCase Line 3: from storage.storageServer import GlusterFSConnection Line 4: from storage import storageServer
Line 5: Line 6: import supervdsm Line 7: Line 8: Line 9: class Supervdsm(object):
Rename to FakeSupervdsm - make it more clear what is this strange empty cla
Done Line 10: Line 11: def getProxy(self): Line 12: return self Line 13:
Line 13: Line 14: Line 15: class GlusterFSConnectionTests(VdsmTestCase): Line 16: Line 17: MOUNT_OPTIONS = "backup-volfile-servers=192.168.122.2:192.168.122.3"
To prevent confusion with the second test, rename this to "computed_options
Done Line 18: Line 19: VOLUME_INFO = \ Line 20: {'music': {'brickCount': '3', Line 21: 'bricks': ['192.168.122.1:/tmp/music',
Line 15: class GlusterFSConnectionTests(VdsmTestCase): Line 16: Line 17: MOUNT_OPTIONS = "backup-volfile-servers=192.168.122.2:192.168.122.3" Line 18: Line 19: VOLUME_INFO = \
No need for a class constant, since this is used by only one test. Just ret
Done Line 20: {'music': {'brickCount': '3', Line 21: 'bricks': ['192.168.122.1:/tmp/music', Line 22: '192.168.122.2:/tmp/music', Line 23: '192.168.122.3:/tmp/music'],
Line 44: 'volumeType': 'REPLICATE'}} Line 45: Line 46: @MonkeyPatch(storageServer, 'supervdsm', Supervdsm()) Line 47: def testGetVolumeInfo(self): Line 48: def glusterVolumeInfo(volumeName=None, remoteServer=None):
Please assert that this function is called with the correct arguments. Woul
Done Line 49: return self.VOLUME_INFO Line 50: Line 51: storageServer.supervdsm.glusterVolumeInfo = glusterVolumeInfo Line 52:
Line 53: gluster = GlusterFSConnection(spec="192.168.122.1:/music") Line 54: self.assertEquals(gluster.options, self.MOUNT_OPTIONS) Line 55: Line 56: def testProvidedGlusterVolumeInfo(self): Line 57: MOUNT_OPTIONS = "backup-volfile-servers=server1:server2"
This name is confusing - use "user_options" instead. No need to make this a
Done Line 58: gluster = GlusterFSConnection(spec="192.168.122.1:/music", Line 59: options=MOUNT_OPTIONS)
https://gerrit.ovirt.org/#/c/40665/9/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 192: @classmethod Line 193: def getLocalPathBase(cls): Line 194: return cls.localPathBase Line 195: Line 196: def __init__(self, spec, vfsType=None, options="", mountClass=mount.Mount):
This change is not needed for this patch, and this patch is not a refactori
Done Line 197: self._vfsType = vfsType Line 198: self._remotePath = spec Line 199: self._options = options Line 200: self._mount = mountClass(spec, self._getLocalPath())
Line 261: @property Line 262: def options(self): Line 263: if self._gluster_options is None: Line 264: self._gluster_options = self._get_gluster_mount_options() Line 265: return self._options + self._gluster_options
Please put @properties after __init__. __init__ is the only place were you
Done Line 266: Line 267: def __init__(self, spec, vfsType=None, options="", mountClass=mount.Mount): Line 268: super(GlusterFSConnection, self).__init__(spec, Line 269: vfsType,
Line 272: self._gluster_options = None Line 273: Line 274: def _get_gluster_mount_options(self): Line 275: if "backup-volfile-servers" in self._options: Line 276: return ""
This means that we let the user override the optimal settings.
Added warn message indicating that user specified mount options will be used. No need to log user setting as they appear in the mount cmd. Line 277: Line 278: volfileServer, volname = self._remotePath.split(":", 1) Line 279: volname = volname.strip('/') Line 280: volInfo = supervdsm.getProxy().glusterVolumeInfo(volname,
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 10: Code-Review+1
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 11:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 12:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 13:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 14:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 15:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 15:
(3 comments)
https://gerrit.ovirt.org/#/c/40665/15/tests/storageServerTests.py File tests/storageServerTests.py:
Line 72: self.assertEquals(mount_con._mount.fs_file, Line 73: "/tmp/glusterSD/server:_volume") Line 74: Line 75: @MonkeyPatch(storageServer, 'supervdsm', FakeSupervdsm()) Line 76: def testGlusterMountOptions(self): Use lower_case names Line 77: def glusterVolumeInfo(volname=None, volfileServer=None): Line 78: self.assertEqual(volname, "music") Line 79: self.assertEqual(volfileServer, "192.168.122.1") Line 80: return {'music': {'brickCount': '3',
Line 114: gluster = GlusterFSConnection(spec="192.168.122.1:/music") Line 115: self.assertEquals(gluster.options, Line 116: "backup-volfile-servers=192.168.122.2:192.168.122.3") Line 117: Line 118: def testUserProvidedGlusterMountOptions(self): Use lower_case names Line 119: user_options = "backup-volfile-servers=server1:server2" Line 120: gluster = GlusterFSConnection(spec="192.168.122.1:/music", Line 121: options=user_options)
https://gerrit.ovirt.org/#/c/40665/15/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 280: mountClass=mount.Mount): Line 281: super(GlusterFSConnection, self).__init__(spec, Line 282: vfsType, Line 283: options, Line 284: mountClass) Call kwargs properly:
vfsType=vfsType, options=options, mountClass=mountClass Line 285: self._gluster_options = None Line 286: Line 287: @property Line 288: def options(self):
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 16:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Ala Hino has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 15:
(3 comments)
https://gerrit.ovirt.org/#/c/40665/15/tests/storageServerTests.py File tests/storageServerTests.py:
Line 72: self.assertEquals(mount_con._mount.fs_file, Line 73: "/tmp/glusterSD/server:_volume") Line 74: Line 75: @MonkeyPatch(storageServer, 'supervdsm', FakeSupervdsm()) Line 76: def testGlusterMountOptions(self):
Use lower_case names
Done Line 77: def glusterVolumeInfo(volname=None, volfileServer=None): Line 78: self.assertEqual(volname, "music") Line 79: self.assertEqual(volfileServer, "192.168.122.1") Line 80: return {'music': {'brickCount': '3',
Line 114: gluster = GlusterFSConnection(spec="192.168.122.1:/music") Line 115: self.assertEquals(gluster.options, Line 116: "backup-volfile-servers=192.168.122.2:192.168.122.3") Line 117: Line 118: def testUserProvidedGlusterMountOptions(self):
Use lower_case names
Done Line 119: user_options = "backup-volfile-servers=server1:server2" Line 120: gluster = GlusterFSConnection(spec="192.168.122.1:/music", Line 121: options=user_options)
https://gerrit.ovirt.org/#/c/40665/15/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 280: mountClass=mount.Mount): Line 281: super(GlusterFSConnection, self).__init__(spec, Line 282: vfsType, Line 283: options, Line 284: mountClass)
Call kwargs properly:
Done Line 285: self._gluster_options = None Line 286: Line 287: @property Line 288: def options(self):
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 16: Code-Review+1
Sahina Bose has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 16:
(1 comment)
https://gerrit.ovirt.org/#/c/40665/16/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 300: volInfo = supervdsm.getProxy().glusterVolumeInfo(volname, Line 301: volfileServer) Line 302: servers = [brick.split(":")[0] Line 303: for brick in volInfo[volname]['bricks']] Line 304: servers.remove(volfileServer) servers would be empty for a replica 1 volume. Also, for a distributed replica, the servers list would have duplicates? Line 305: return "backup-volfile-servers=" + ":".join(servers) Line 306: Line 307: Line 308: class NFSConnection(object):
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 16: -Code-Review
(2 comments)
Needs fix for replica 1 case.
https://gerrit.ovirt.org/#/c/40665/16/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 299: volname = volname.strip('/') Line 300: volInfo = supervdsm.getProxy().glusterVolumeInfo(volname, Line 301: volfileServer) Line 302: servers = [brick.split(":")[0] Line 303: for brick in volInfo[volname]['bricks']] Sahina, servers will contain here the single and only server - right? Line 304: servers.remove(volfileServer) Line 305: return "backup-volfile-servers=" + ":".join(servers) Line 306: Line 307:
Line 300: volInfo = supervdsm.getProxy().glusterVolumeInfo(volname, Line 301: volfileServer) Line 302: servers = [brick.split(":")[0] Line 303: for brick in volInfo[volname]['bricks']] Line 304: servers.remove(volfileServer)
servers would be empty for a replica 1 volume. Also, for a distributed repl
Right, we should do:
if not servers: return ""
return "backup-volfile-servers=" + ":".join(servers)
Ala, please add test for replica 1, verifying this logic. Line 305: return "backup-volfile-servers=" + ":".join(servers) Line 306: Line 307: Line 308: class NFSConnection(object):
Sahina Bose has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 16:
(1 comment)
https://gerrit.ovirt.org/#/c/40665/16/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 299: volname = volname.strip('/') Line 300: volInfo = supervdsm.getProxy().glusterVolumeInfo(volname, Line 301: volfileServer) Line 302: servers = [brick.split(":")[0] Line 303: for brick in volInfo[volname]['bricks']]
Sahina, servers will contain here the single and only server - right?
Yes, one brick = <server>:<mount point>. So for replica 1, it would contain 1 server, Line 304: servers.remove(volfileServer) Line 305: return "backup-volfile-servers=" + ":".join(servers) Line 306: Line 307:
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 17:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Ala Hino has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 16:
(1 comment)
https://gerrit.ovirt.org/#/c/40665/16/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 300: volInfo = supervdsm.getProxy().glusterVolumeInfo(volname, Line 301: volfileServer) Line 302: servers = [brick.split(":")[0] Line 303: for brick in volInfo[volname]['bricks']] Line 304: servers.remove(volfileServer)
Right, we should do:
Done Line 305: return "backup-volfile-servers=" + ":".join(servers) Line 306: Line 307: Line 308: class NFSConnection(object):
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 17:
(1 comment)
https://gerrit.ovirt.org/#/c/40665/17//COMMIT_MSG Commit Message:
Line 11: replica. To do so, vdsm uses gluster get info api in order to get the Line 12: IPs of the other two servers and then, builds mount command using Line 13: gluster 'backup-volfile-servers' option. Line 14: Line 15: Note that today vdsm has soft-dependency gluster-fuse and with this gluster-fuse -> glusterfs-fuse cgange -> change gluster-cli -> glusterfs-cli Line 16: cgange, vdsm will have dependency on gluster-cli. If this dependency Line 17: doesn't exist, there is an error when creating gluster storage domain. Line 18: Line 19: Change-Id: I2478a5edc1fc9d24eb96d64a32a98a2467ce2989
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 17:
(2 comments)
https://gerrit.ovirt.org/#/c/40665/17/tests/storageServerTests.py File tests/storageServerTests.py:
Line 85: storageServer.supervdsm.glusterVolumeInfo = glusterVolumeInfo Line 86: Line 87: gluster = GlusterFSConnection(spec="192.168.122.1:/music") Line 88: self.assertEquals(gluster.options, Line 89: "backup-volfile-servers=192.168.122.2:192.168.122.3") These tests are nice, but we don't see the original user options.
This may actually hide a bug, since mount options are usually separate by "," but you are just combining self.options + self._gluster_options.
You should test two cases:
- user options empty + gluster options - user options non-empty + gluster options
These tests seems ok for the first option, lets add a test for non-empty user options. Line 90: Line 91: @MonkeyPatch(storageServer, 'supervdsm', FakeSupervdsm()) Line 92: def test_gluster_replica1_mount_options(self): Line 93: def glusterVolumeInfo(volname=None, volfileServer=None):
https://gerrit.ovirt.org/#/c/40665/17/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 302: servers = [brick.split(":")[0] Line 303: for brick in volInfo[volname]['bricks']] Line 304: servers.remove(volfileServer) Line 305: if not servers: Line 306: return "" Please add empty line when you break the normal flow - same way you handle user options above. Line 307: return "backup-volfile-servers=" + ":".join(servers) Line 308: Line 309: Line 310: class NFSConnection(object):
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 18:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Ala Hino has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 17:
(3 comments)
https://gerrit.ovirt.org/#/c/40665/17//COMMIT_MSG Commit Message:
Line 11: replica. To do so, vdsm uses gluster get info api in order to get the Line 12: IPs of the other two servers and then, builds mount command using Line 13: gluster 'backup-volfile-servers' option. Line 14: Line 15: Note that today vdsm has soft-dependency gluster-fuse and with this
gluster-fuse -> glusterfs-fuse
Done Line 16: cgange, vdsm will have dependency on gluster-cli. If this dependency Line 17: doesn't exist, there is an error when creating gluster storage domain. Line 18: Line 19: Change-Id: I2478a5edc1fc9d24eb96d64a32a98a2467ce2989
https://gerrit.ovirt.org/#/c/40665/17/tests/storageServerTests.py File tests/storageServerTests.py:
Line 85: storageServer.supervdsm.glusterVolumeInfo = glusterVolumeInfo Line 86: Line 87: gluster = GlusterFSConnection(spec="192.168.122.1:/music") Line 88: self.assertEquals(gluster.options, Line 89: "backup-volfile-servers=192.168.122.2:192.168.122.3")
These tests are nice, but we don't see the original user options.
Done Line 90: Line 91: @MonkeyPatch(storageServer, 'supervdsm', FakeSupervdsm()) Line 92: def test_gluster_replica1_mount_options(self): Line 93: def glusterVolumeInfo(volname=None, volfileServer=None):
https://gerrit.ovirt.org/#/c/40665/17/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 302: servers = [brick.split(":")[0] Line 303: for brick in volInfo[volname]['bricks']] Line 304: servers.remove(volfileServer) Line 305: if not servers: Line 306: return ""
Please add empty line when you break the normal flow - same way you handle
Done Line 307: return "backup-volfile-servers=" + ":".join(servers) Line 308: Line 309: Line 310: class NFSConnection(object):
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 19:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Darshan N has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 19:
(1 comment)
https://gerrit.ovirt.org/#/c/40665/19/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 303: volInfo = supervdsm.getProxy().glusterVolumeInfo(volname, This call might raise some exception. can it be handled suitably?
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 20:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Ala Hino has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 19:
(1 comment)
https://gerrit.ovirt.org/#/c/40665/19/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 303: volInfo = supervdsm.getProxy().glusterVolumeInfo(volname,
This call might raise some exception. can it be handled suitably?
What's wrong with raising original exception (GlusterVolumeInfoFailedException) raised by glusterUtils.py? There is no additional info that can be added to the exception here.
Sandro Bonazzola has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 20:
(2 comments)
https://gerrit.ovirt.org/#/c/40665/20//COMMIT_MSG Commit Message:
Line 13: gluster 'backup-volfile-servers' option. Line 14: Line 15: Note that today vdsm has soft-dependency glusterfs-fuse and with this Line 16: change, vdsm will have dependency on glusterfs-cli. If this dependency Line 17: doesn't exist, there is an error when creating gluster storage domain. Can you add an example of how the code change when using vdscli in order to use the new mount? Line 18: Line 19: Change-Id: I2478a5edc1fc9d24eb96d64a32a98a2467ce2989 Line 20: Bug-Url: https://bugzilla.redhat.com/1177777
https://gerrit.ovirt.org/#/c/40665/20/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 1: # 2012-2015 Line 2: # Copyright 2012 Red Hat, Inc. Line 3: # Line 4: # This program is free software; you can redistribute it and/or modify Line 5: # it under the terms of the GNU General Public License as published by
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 19:
(1 comment)
https://gerrit.ovirt.org/#/c/40665/19/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 299: return "" Line 300: Line 301: volfileServer, volname = self._remotePath.split(":", 1) Line 302: volname = volname.strip('/') Line 303: volInfo = supervdsm.getProxy().glusterVolumeInfo(volname,
This call might raise some exception. can it be handled suitably?
Ala, check how NFSConnection is handling such fatal failures.
If we have a suitable error in storage_exception.py we can log the original traceback and raise the storage_exception error, so the engine can show one of the translated error messages.
If we don't handle errors here, we will get a very clear traceback in vdsm log, and engine will show a generic error message. This may be a fine way to handle the error. Line 304: volfileServer) Line 305: servers = [brick.split(":")[0] Line 306: for brick in volInfo[volname]['bricks']] Line 307: servers.remove(volfileServer)
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 20:
(1 comment)
https://gerrit.ovirt.org/#/c/40665/20//COMMIT_MSG Commit Message:
Line 13: gluster 'backup-volfile-servers' option. Line 14: Line 15: Note that today vdsm has soft-dependency glusterfs-fuse and with this Line 16: change, vdsm will have dependency on glusterfs-cli. If this dependency Line 17: doesn't exist, there is an error when creating gluster storage domain.
Can you add an example of how the code change when using vdscli in order to
There is not chnage in the api, old call will work without any change.
If you were mounting before with volfile-backup-servers=server1:server2 option, you don't have to do this anymore, since vdsm will get the servers addresses from gluster, and add this mount option automatically.
Ala, please explain this in the commit message. Line 18: Line 19: Change-Id: I2478a5edc1fc9d24eb96d64a32a98a2467ce2989 Line 20: Bug-Url: https://bugzilla.redhat.com/1177777
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 21:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Ala Hino has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 19:
(1 comment)
https://gerrit.ovirt.org/#/c/40665/19/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 299: return "" Line 300: Line 301: volfileServer, volname = self._remotePath.split(":", 1) Line 302: volname = volname.strip('/') Line 303: volInfo = supervdsm.getProxy().glusterVolumeInfo(volname,
Ala, check how NFSConnection is handling such fatal failures.
I don't see a suitable (gluster) exception in storage_exception.py. I will add a suitable exception and update the engine part in separate patches. Line 304: volfileServer) Line 305: servers = [brick.split(":")[0] Line 306: for brick in volInfo[volname]['bricks']] Line 307: servers.remove(volfileServer)
Ala Hino has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 20:
(2 comments)
https://gerrit.ovirt.org/#/c/40665/20//COMMIT_MSG Commit Message:
Line 13: gluster 'backup-volfile-servers' option. Line 14: Line 15: Note that today vdsm has soft-dependency glusterfs-fuse and with this Line 16: change, vdsm will have dependency on glusterfs-cli. If this dependency Line 17: doesn't exist, there is an error when creating gluster storage domain.
There is not chnage in the api, old call will work without any change.
Done Line 18: Line 19: Change-Id: I2478a5edc1fc9d24eb96d64a32a98a2467ce2989 Line 20: Bug-Url: https://bugzilla.redhat.com/1177777
https://gerrit.ovirt.org/#/c/40665/20/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 1: #
2012-2015
Done Line 2: # Copyright 2012 Red Hat, Inc. Line 3: # Line 4: # This program is free software; you can redistribute it and/or modify Line 5: # it under the terms of the GNU General Public License as published by
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 19:
(2 comments)
https://gerrit.ovirt.org/#/c/40665/19/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 290: self._gluster_options = self._get_gluster_mount_options() Line 291: comma = ',' Line 292: if self._options is "" or self._gluster_options is "": Line 293: comma = "" Line 294: return self._options + comma + self._gluster_options Please replace this with the code in the next patch - you don't introduce code in patch and fix it in the next. Line 295: Line 296: def _get_gluster_mount_options(self): Line 297: if "backup-volfile-servers" in self._options: Line 298: self.log.warn("User specified mount options will be used.")
Line 299: return "" Line 300: Line 301: volfileServer, volname = self._remotePath.split(":", 1) Line 302: volname = volname.strip('/') Line 303: volInfo = supervdsm.getProxy().glusterVolumeInfo(volname,
I don't see a suitable (gluster) exception in storage_exception.py.
Ok Line 304: volfileServer) Line 305: servers = [brick.split(":")[0] Line 306: for brick in volInfo[volname]['bricks']] Line 307: servers.remove(volfileServer)
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 22:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 22:
(1 comment)
https://gerrit.ovirt.org/#/c/40665/22/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 286: Line 287: @property Line 288: def options(self): Line 289: self._gluster_options = self._get_gluster_mount_options() Line 290: return ",".join(p for p in (self._options, self._gluster_options) if p) Why did you remove the lazy initialization here? it is valid only in the next patch, where you cache the volume info.
If you compute the value on each call, why do you need the _gluster_options variable?
Please keep the original code, and change only the way you join the arguments. Line 291: Line 292: def _get_gluster_mount_options(self): Line 293: if "backup-volfile-servers" in self._options: Line 294: self.log.warn("User specified mount options will be used.")
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 23:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 22:
(2 comments)
https://gerrit.ovirt.org/#/c/40665/22/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 288: def options(self): Line 289: self._gluster_options = self._get_gluster_mount_options() Line 290: return ",".join(p for p in (self._options, self._gluster_options) if p) Line 291: Line 292: def _get_gluster_mount_options(self): Lets be more specific - we do not compute here the gluster mount options, but the backup-volfile-servers option.
So lets rename this to _get_backup_servers_option() Line 293: if "backup-volfile-servers" in self._options: Line 294: self.log.warn("User specified mount options will be used.") Line 295: return "" Line 296:
Line 290: return ",".join(p for p in (self._options, self._gluster_options) if p) Line 291: Line 292: def _get_gluster_mount_options(self): Line 293: if "backup-volfile-servers" in self._options: Line 294: self.log.warn("User specified mount options will be used.") Lets be more specific about the warning:
"User specified backup-volfile-servers option will be used"
And please consider this shorter and more direct:
"Using user specified backup-volfile-servers option" Line 295: return "" Line 296: Line 297: volfileServer, volname = self._remotePath.split(":", 1) Line 298: volname = volname.strip('/')
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 23:
Please also rebase on top of Darshan latest patch.
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 24:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Ala Hino has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 22:
(3 comments)
https://gerrit.ovirt.org/#/c/40665/22/vdsm/storage/storageServer.py File vdsm/storage/storageServer.py:
Line 286: Line 287: @property Line 288: def options(self): Line 289: self._gluster_options = self._get_gluster_mount_options() Line 290: return ",".join(p for p in (self._options, self._gluster_options) if p)
Why did you remove the lazy initialization here? it is valid only in the ne
Done Line 291: Line 292: def _get_gluster_mount_options(self): Line 293: if "backup-volfile-servers" in self._options: Line 294: self.log.warn("User specified mount options will be used.")
Line 288: def options(self): Line 289: self._gluster_options = self._get_gluster_mount_options() Line 290: return ",".join(p for p in (self._options, self._gluster_options) if p) Line 291: Line 292: def _get_gluster_mount_options(self):
Lets be more specific - we do not compute here the gluster mount options, b
Done Line 293: if "backup-volfile-servers" in self._options: Line 294: self.log.warn("User specified mount options will be used.") Line 295: return "" Line 296:
Line 290: return ",".join(p for p in (self._options, self._gluster_options) if p) Line 291: Line 292: def _get_gluster_mount_options(self): Line 293: if "backup-volfile-servers" in self._options: Line 294: self.log.warn("User specified mount options will be used.")
Lets be more specific about the warning:
Done Line 295: return "" Line 296: Line 297: volfileServer, volname = self._remotePath.split(":", 1) Line 298: volname = volname.strip('/')
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 24: Code-Review+1
Perfect, thanks!
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 25:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 25: Verified+1
Tested lightly on Fedora 21:
- Activate host with gluster storage domains - Put gluster storage domain to maintenance - Activate gluster storage domain
Tested together with https://gerrit.ovirt.org/41931
Freddy Rolland has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 25: Code-Review+1
Sandro Bonazzola has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 25: Code-Review+1
Ala Hino has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 25: Verified+1
Verified that admin provided backup-volfile-servers option is are used in the mount command and not calculated by vdsm.
In addition, this use case is well covered by unit tests.
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 26:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 27:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Adam Litke has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 27: Code-Review+2
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 28:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 28: -Verified
Ala, the patch you depend on no longer includes the glusterVolumeInfo code in vdsm - is it possible that an older vdsm-gluster package was installed when you verified this patch without the vdsm-gluster package?
Please verify again that after removing vdsm-gluster package, mounting gluster storage domain works.
Nir Soffer has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 28: Verified+1
I verified again installation without vdsm-gluster and it works.
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 29:
* Update tracker::#1177777::OK * Check Bug-Url::OK * Check Public Bug::#1177777::OK, public bug * Check Product::#1177777::OK, Correct product oVirt * Check TR::SKIP, not in a monitored branch (ovirt-3.5 ovirt-3.4 ovirt-3.3 ovirt-3.2) * Check merged to previous::IGNORE, Not in stable branch (['ovirt-3.5', 'ovirt-3.4', 'ovirt-3.3'])
Dan Kenigsberg has submitted this change and it was merged.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
gluster: Allow gluster mount with additional servers
Currently, engine supports mounting single gluster server. With this change, mount will include the other two gluster servers defined in the replica. To do so, vdsm uses gluster get info api in order to get the IPs of the other two servers and then, builds mount command using gluster 'backup-volfile-servers' option.
This patch does not change existing api, it only saves the user the work needed to mount other gluster severs. Before this patch, if user explicitly provided 'volfile-backup-servers' option, with this patch, it is not required anymore.
Note that today vdsm has soft-dependency glusterfs-fuse and with this change, vdsm will have dependency on glusterfs-cli. If this dependency doesn't exist, there is an error when creating gluster storage domain.
Change-Id: I2478a5edc1fc9d24eb96d64a32a98a2467ce2989 Bug-Url: https://bugzilla.redhat.com/1177777 Signed-off-by: Ala Hino ahino@redhat.com Reviewed-on: https://gerrit.ovirt.org/40665 Reviewed-by: Nir Soffer nsoffer@redhat.com Reviewed-by: Jenkins CI Tested-by: Jenkins CI Reviewed-by: Freddy Rolland frolland@redhat.com Reviewed-by: Sandro Bonazzola sbonazzo@redhat.com Reviewed-by: Adam Litke alitke@redhat.com Tested-by: Nir Soffer nsoffer@redhat.com Continuous-Integration: Jenkins CI --- M tests/storageServerTests.py M vdsm/storage/storageServer.py 2 files changed, 96 insertions(+), 1 deletion(-)
Approvals: Adam Litke: Looks good to me, approved Sandro Bonazzola: Looks good to me, but someone else must approve Nir Soffer: Verified; Looks good to me, but someone else must approve Jenkins CI: Verified; Looks good to me, but someone else must approve; Passed CI tests Freddy Rolland: Looks good to me, but someone else must approve Ala Hino: Verified
automation@ovirt.org has posted comments on this change.
Change subject: gluster: Allow gluster mount with additional servers ......................................................................
Patch Set 30:
* Update tracker::#1177777::OK * Check TR::#1177777::ERROR, 3.6.0 should not match .*
vdsm-patches@lists.fedorahosted.org