Change in vdsm[master]: [WIP]clean authKey for managers
by lvroyce@linux.vnet.ibm.com
Royce Lv has uploaded a new change for review.
Change subject: [WIP]clean authKey for managers
......................................................................
[WIP]clean authKey for managers
when authKey is omitted, it will use current_process().authKey,
and this key will be herited when using Process() to start child.
So no authKey manager is used for child/parent communication.
Change-Id: Iff8a8169d30fc12b212d550474ef18189b5acc8d
Signed-off-by:Royce Lv<lvroyce(a)linux.vnet.ibm.com>
---
M vdsm/supervdsm.py
M vdsm/supervdsmServer.py
2 files changed, 2 insertions(+), 19 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/93/11193/1
diff --git a/vdsm/supervdsm.py b/vdsm/supervdsm.py
index 10abae0..0512e30 100644
--- a/vdsm/supervdsm.py
+++ b/vdsm/supervdsm.py
@@ -117,21 +117,6 @@
for f in (self.pidfile, self.timestamp, self.address):
utils.rmFile(f)
- def _start(self):
- self._authkey = str(uuid.uuid4())
- self._log.debug("Launching Super Vdsm")
-
- # we pass to svdsm filenames and uid. Svdsm will use those filenames
- # to create its internal files and give to the passed uid the
- # permissions to read those files.
- superVdsmCmd = [constants.EXT_PYTHON, SUPERVDSM,
- self._authkey, str(os.getpid()),
- self.pidfile, self.timestamp, self.address,
- str(os.getuid())]
-
- misc.execCmd(superVdsmCmd, sync=False, sudo=True)
- sleep(2)
-
def kill(self):
try:
with open(self.pidfile, "r") as f:
@@ -142,7 +127,6 @@
exc_info=True)
self._cleanOldFiles()
- self._authkey = None
self._manager = None
self._svdsm = None
self._firstLaunch = True
@@ -179,8 +163,7 @@
return False
def _connect(self):
- self._manager = _SuperVdsmManager(address=self.address,
- authkey=self._authkey)
+ self._manager = _SuperVdsmManager(address=self.address)
self._manager.register('instance')
self._manager.register('open')
self._log.debug("Trying to connect to Super Vdsm")
diff --git a/vdsm/supervdsmServer.py b/vdsm/supervdsmServer.py
index 77d2798..d789b4c 100755
--- a/vdsm/supervdsmServer.py
+++ b/vdsm/supervdsmServer.py
@@ -400,7 +400,7 @@
try:
log.debug("Creating remote object manager")
- manager = _SuperVdsmManager(address=address, authkey=authkey)
+ manager = _SuperVdsmManager(address=address)
manager.register('instance', callable=_SuperVdsm)
server = manager.get_server()
--
To view, visit http://gerrit.ovirt.org/11193
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Iff8a8169d30fc12b212d550474ef18189b5acc8d
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Royce Lv <lvroyce(a)linux.vnet.ibm.com>
10 years, 7 months
Change in vdsm[master]: [WIP]remove launch and kill svdsm function from supervdsm.py
by lvroyce@linux.vnet.ibm.com
Royce Lv has uploaded a new change for review.
Change subject: [WIP]remove launch and kill svdsm function from supervdsm.py
......................................................................
[WIP]remove launch and kill svdsm function from supervdsm.py
when vdsm lost priviledge, it should not launch and kill priviledged process
Change-Id: Iba310fcd0e82b86b41729dfe7aeeb8fbd56bfaf5
Signed-off-by: Royce Lv<lvroyce(a)linux.vnet.ibm.com>
---
M Makefile.am
M vdsm/supervdsm.py
2 files changed, 3 insertions(+), 28 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/94/11194/1
diff --git a/Makefile.am b/Makefile.am
index f0876a8..a0c8667 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -65,7 +65,8 @@
check-local:
find . -path './.git' -prune -type f -o \
- -name '*.py' -o -name '*.py.in' | xargs $(PYFLAKES)
+ -name '*.py' ! 'vdsmServer.py' -o \
+ -name '*.py.in' | xargs $(PYFLAKES)
$(PEP8) --exclude="$(PEP8_BLACKLIST)" --filename '*.py,*.py.in' \
$(PEP8_WHITELIST)
@if test -f .gitignore; then \
diff --git a/vdsm/supervdsm.py b/vdsm/supervdsm.py
index 0512e30..df0c8a3 100644
--- a/vdsm/supervdsm.py
+++ b/vdsm/supervdsm.py
@@ -51,7 +51,6 @@
PIDFILE = os.path.join(constants.P_VDSM_RUN, "svdsm.pid")
TIMESTAMP = os.path.join(constants.P_VDSM_RUN, "svdsm.time")
ADDRESS = os.path.join(constants.P_VDSM_RUN, "svdsm.sock")
-SUPERVDSM = __supervdsmServerPath()
extraPythonPathList = []
@@ -75,7 +74,7 @@
# don't care that isRunning will run twice
with self._supervdsmProxy.proxyLock:
if not self._supervdsmProxy.isRunning():
- self._supervdsmProxy.launch()
+ self._supervdsmProxy.connect()
try:
return callMethod()
@@ -110,26 +109,6 @@
def open(self, *args, **kwargs):
return self._manager.open(*args, **kwargs)
-
- def _cleanOldFiles(self):
- self._log.debug("Cleanning svdsm old files: %s, %s, %s",
- self.pidfile, self.timestamp, self.address)
- for f in (self.pidfile, self.timestamp, self.address):
- utils.rmFile(f)
-
- def kill(self):
- try:
- with open(self.pidfile, "r") as f:
- pid = int(f.read().strip())
- misc.execCmd([constants.EXT_KILL, "-9", str(pid)], sudo=True)
- except Exception:
- self._log.error("Could not kill old Super Vdsm %s",
- exc_info=True)
-
- self._cleanOldFiles()
- self._manager = None
- self._svdsm = None
- self._firstLaunch = True
def isRunning(self):
if self._firstLaunch or self._svdsm is None:
@@ -173,11 +152,6 @@
self._log.warn("Connect to svdsm failed %s", ex)
raise
self._svdsm = self._manager.instance()
-
- def launch(self):
- self._firstLaunch = False
- self._start()
- utils.retry(self._connect, Exception, timeout=60)
def __getattr__(self, name):
return ProxyCaller(self, name)
--
To view, visit http://gerrit.ovirt.org/11194
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Iba310fcd0e82b86b41729dfe7aeeb8fbd56bfaf5
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Royce Lv <lvroyce(a)linux.vnet.ibm.com>
10 years, 7 months
Change in vdsm[master]: change of the isRunning function
by lvroyce@linux.vnet.ibm.com
Hello Zhou Zheng Sheng,
I'd like you to do a code review. Please visit
http://gerrit.ovirt.org/10236
to review the following change.
Change subject: change of the isRunning function
......................................................................
change of the isRunning function
isRunning() now checks supervdsm pid this not guarantee
supervdsmServer really listened and manager framework works right.
since ping is call to local manager so it won't take much time.
Only ping() can identify whether supervdsm works fine.
Change isRunning() to use ping() also eliminate Authentication error
usecases:
1.first launch: ping fails because of _svdsm=None, launch supervdsm
2.previous _svdsm: ping fails with Autherror,kill and launch
3.previous svdsm exit:ping fails with socket error,launch
4.call error: raise to caller
5.ping succeed,svdsm killed when callmethod: raise error to caller
Change-Id: Ib25809d4416f26bc95dc72e7b32b8b2a17a71879
Signed-off-by:Royce Lv<lvroyce(a)linux.vnet.ibm.com>
Signed-off-by:Zhou Zheng Sheng<zhshzhou(a)linux.vnet.ibm.com>
---
M tests/superVdsmTests.py
M vdsm/supervdsm.py
M vdsm/supervdsmServer.py
3 files changed, 24 insertions(+), 79 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/36/10236/1
diff --git a/tests/superVdsmTests.py b/tests/superVdsmTests.py
index 87de66d..57f4de8 100644
--- a/tests/superVdsmTests.py
+++ b/tests/superVdsmTests.py
@@ -28,7 +28,7 @@
superVdsmCmd = [getNeededPythonPath(), constants.EXT_PYTHON,
supervdsm.SUPERVDSM,
self._authkey, str(os.getpid()),
- self.pidfile, self.timestamp, self.address,
+ self.pidfile, self.address,
str(os.getuid())]
misc.execCmd(superVdsmCmd, sync=False, sudo=True)
sleep(2)
@@ -41,33 +41,24 @@
# temporary values to run temporary svdsm
self.pidfd, pidfile = tempfile.mkstemp()
- self.timefd, timestamp = tempfile.mkstemp()
self.addfd, address = tempfile.mkstemp()
- self._proxy.setIPCPaths(pidfile, timestamp, address)
+ self._proxy.setIPCPaths(pidfile, address)
def tearDown(self):
supervdsm.extraPythonPathList = []
- for fd in (self.pidfd, self.timefd, self.addfd):
- os.close(fd)
+ os.close(self.pidfd)
self._proxy.kill() # cleanning old temp files
@MonkeyPatch(supervdsm.SuperVdsmProxy, '_start', monkeyStart)
def testIsSuperUp(self):
- self._proxy.ping() # this call initiate svdsm
- self.assertTrue(self._proxy.isRunning())
+ self._proxy.ping()
+ self.assertTrue(self._proxy.isRunning()) # this call initiate svdsm
@MonkeyPatch(supervdsm.SuperVdsmProxy, '_start', monkeyStart)
def testKillSuper(self):
self._proxy.ping()
self._proxy.kill()
- self.assertFalse(self._proxy.isRunning())
+ self.assertRaises(AttributeError, self._proxy.isRunning)
self._proxy.ping() # Launching vdsm after kill
self.assertTrue(self._proxy.isRunning())
-
- @MonkeyPatch(supervdsm.SuperVdsmProxy, '_start', monkeyStart)
- def testNoPidFile(self):
- self._proxy.ping() # svdsm is up
- self.assertTrue(self._proxy.isRunning())
- utils.rmFile(self._proxy.timestamp)
- self.assertRaises(IOError, self._proxy.isRunning)
diff --git a/vdsm/supervdsm.py b/vdsm/supervdsm.py
index 532d5ac..a9bbdf6 100644
--- a/vdsm/supervdsm.py
+++ b/vdsm/supervdsm.py
@@ -20,13 +20,11 @@
#
import os
-from multiprocessing import AuthenticationError
from multiprocessing.managers import BaseManager
import logging
import threading
import uuid
from time import sleep
-from errno import ENOENT, ESRCH
import storage.misc as misc
from vdsm import constants, utils
@@ -45,7 +43,6 @@
raise RuntimeError("SuperVDSM Server not found")
PIDFILE = os.path.join(constants.P_VDSM_RUN, "svdsm.pid")
-TIMESTAMP = os.path.join(constants.P_VDSM_RUN, "svdsm.time")
ADDRESS = os.path.join(constants.P_VDSM_RUN, "svdsm.sock")
SUPERVDSM = __supervdsmServerPath("supervdsmServer.py")
@@ -66,22 +63,16 @@
callMethod = lambda: \
getattr(self._supervdsmProxy._svdsm, self._funcName)(*args,
**kwargs)
- if not self._supervdsmProxy.isRunning():
- # getting inside only when svdsm is down. its rare case so we
- # don't care that isRunning will run twice
- with self._supervdsmProxy.proxyLock:
- if not self._supervdsmProxy.isRunning():
- self._supervdsmProxy.launch()
- try:
- return callMethod()
- # handling internal exception that we raise to identify supervdsm
- # validation. only this exception can cause kill!
- except AuthenticationError:
- with self._supervdsmProxy.proxyLock:
+ with self._supervdsmProxy.proxyLock:
+ try:
+ # make sure supervdsmServer works right
+ self._supervdsmProxy.isRunning()
+ except:
self._supervdsmProxy.kill()
self._supervdsmProxy.launch()
- return callMethod()
+
+ return callMethod()
class SuperVdsmProxy(object):
@@ -92,25 +83,19 @@
def __init__(self):
self.proxyLock = threading.Lock()
- self._firstLaunch = True
+ self.setIPCPaths(PIDFILE, ADDRESS)
- # Declaration of public variables that keep files' names that svdsm
- # uses. We need to be able to change these variables so that running
- # tests doesn't disturb and already running VDSM on the host.
- self.setIPCPaths(PIDFILE, TIMESTAMP, ADDRESS)
-
- def setIPCPaths(self, pidfile, timestamp, address):
+ def setIPCPaths(self, pidfile, address):
self.pidfile = pidfile
- self.timestamp = timestamp
self.address = address
def open(self, *args, **kwargs):
return self._manager.open(*args, **kwargs)
def _cleanOldFiles(self):
- self._log.debug("Cleanning svdsm old files: %s, %s, %s",
- self.pidfile, self.timestamp, self.address)
- for f in (self.pidfile, self.timestamp, self.address):
+ self._log.debug("Cleanning svdsm old files: %s, %s",
+ self.pidfile, self.address)
+ for f in (self.pidfile, self.address):
utils.rmFile(f)
def _start(self):
@@ -122,7 +107,7 @@
# permissions to read those files.
superVdsmCmd = [constants.EXT_PYTHON, SUPERVDSM,
self._authkey, str(os.getpid()),
- self.pidfile, self.timestamp, self.address,
+ self.pidfile, self.address,
str(os.getuid())]
misc.execCmd(superVdsmCmd, sync=False, sudo=True)
@@ -134,42 +119,15 @@
pid = int(f.read().strip())
misc.execCmd([constants.EXT_KILL, "-9", str(pid)], sudo=True)
except Exception:
- self._log.error("Could not kill old Super Vdsm %s",
- exc_info=True)
+ self._log.warn("Could not kill old Super Vdsm")
self._cleanOldFiles()
self._authkey = None
self._manager = None
self._svdsm = None
- self._firstLaunch = True
def isRunning(self):
- try:
- with open(self.pidfile, "r") as f:
- spid = f.read().strip()
- with open(self.timestamp, "r") as f:
- createdTime = f.read().strip()
- except IOError as e:
- # pid file and timestamp file must be exist after first launch,
- # otherwise excpetion will be raised to svdsm caller
- if e.errno == ENOENT and self._firstLaunch:
- return False
- else:
- raise
-
- try:
- pTime = str(misc.getProcCtime(spid))
- except OSError as e:
- if e.errno == ESRCH:
- # Means pid is not exist, svdsm was killed
- return False
- else:
- raise
-
- if pTime == createdTime:
- return True
- else:
- return False
+ return self._svdsm.ping()
def _connect(self):
self._manager = _SuperVdsmManager(address=self.address,
@@ -185,7 +143,6 @@
self._svdsm = self._manager.instance()
def launch(self):
- self._firstLaunch = False
self._start()
utils.retry(self._connect, Exception, timeout=60)
diff --git a/vdsm/supervdsmServer.py b/vdsm/supervdsmServer.py
index 5effd41..8968a82 100755
--- a/vdsm/supervdsmServer.py
+++ b/vdsm/supervdsmServer.py
@@ -336,15 +336,12 @@
sys.exit(errno.EPERM)
log.debug("Parsing cmd args")
- authkey, parentPid, pidfile, timestamp, address, uid = sys.argv[1:]
+ authkey, parentPid, pidfile, address, uid = sys.argv[1:]
- log.debug("Creating PID and TIMESTAMP files: %s, %s",
- pidfile, timestamp)
+ log.debug("Creating PID:%s", pidfile)
spid = os.getpid()
with open(pidfile, "w") as f:
f.write(str(spid) + "\n")
- with open(timestamp, "w") as f:
- f.write(str(misc.getProcCtime(spid) + "\n"))
log.debug("Cleaning old socket %s", address)
if os.path.exists(address):
@@ -369,7 +366,7 @@
servThread.setDaemon(True)
servThread.start()
- for f in (address, timestamp, pidfile):
+ for f in (address, pidfile):
chown(f, int(uid), METADATA_GROUP)
log.debug("Started serving super vdsm object")
--
To view, visit http://gerrit.ovirt.org/10236
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ib25809d4416f26bc95dc72e7b32b8b2a17a71879
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Royce Lv <lvroyce(a)linux.vnet.ibm.com>
Gerrit-Reviewer: Zhou Zheng Sheng <zhshzhou(a)linux.vnet.ibm.com>
10 years, 7 months
Change in vdsm[master]: [WIP]start vdsm as subprocess in supervdsm server
by lvroyce@linux.vnet.ibm.com
Royce Lv has uploaded a new change for review.
Change subject: [WIP]start vdsm as subprocess in supervdsm server
......................................................................
[WIP]start vdsm as subprocess in supervdsm server
supervdsm will run as root, vdsm start needs to drop priviledge.
export start vdsm function,
run it as subprocess when start supervdsm server.
sleep to make vdsm starts first and log ownership right.
Change-Id: I540b1d3f3c823433f100f4803f31322fc7ee2153
Signed-off-by: Royce Lv<lvroyce(a)linux.vnet.ibm.com>
---
M vdsm/supervdsmServer.py
R vdsm/vdsmServer.py
2 files changed, 12 insertions(+), 31 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/91/11191/1
diff --git a/vdsm/supervdsmServer.py b/vdsm/supervdsmServer.py
index 83a5803..d64076b 100755
--- a/vdsm/supervdsmServer.py
+++ b/vdsm/supervdsmServer.py
@@ -423,4 +423,7 @@
sys.exit(1)
if __name__ == '__main__':
+ vdsmProc = Process(target=startVdsm)
+ vdsmProc.start()
+ sleep(2)
main()
diff --git a/vdsm/vdsm b/vdsm/vdsmServer.py
similarity index 77%
rename from vdsm/vdsm
rename to vdsm/vdsmServer.py
old mode 100755
new mode 100644
index 4746a0f..337a4ca
--- a/vdsm/vdsm
+++ b/vdsm/vdsmServer.py
@@ -10,11 +10,8 @@
import os
import sys
-import getopt
import signal
-import getpass
import pwd
-import grp
import threading
import logging
import syslog
@@ -99,19 +96,6 @@
log.info(str(t))
-def parse_args():
- opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
- for o, v in opts:
- o = o.lower()
- if o == "-h" or o == "--help":
- usage()
- sys.exit(0)
-
- if len(args) >= 1:
- usage()
- sys.exit(1)
-
-
def __assertLogPermission():
if not os.access(constants.P_VDSM_LOG, os.W_OK):
syslog.syslog("vdsm log directory is not accessible")
@@ -127,21 +111,15 @@
sys.exit(1)
-def __assertVdsmUser():
- username = getpass.getuser()
- if username != constants.VDSM_USER:
- syslog.syslog("VDSM failed to start: running user is not %s, trying "
- "to run from user %s" % (constants.VDSM_USER, username))
- sys.exit(1)
- group = grp.getgrnam(constants.VDSM_GROUP)
- if (constants.VDSM_USER not in group.gr_mem) and \
- (pwd.getpwnam(constants.VDSM_USER).pw_gid != group.gr_gid):
- syslog.syslog("VDSM failed to start: vdsm user is not in KVM group")
- sys.exit(1)
+def startVdsm():
+ def dropPrivileges():
+ if os.getuid() != 0:
+ sys.exit(1)
+ vdsm_uid, vdsm_gid = pwd.getpwnam(constants.VDSM_USER)[2:4:]
-if __name__ == '__main__':
- __assertVdsmUser()
+ os.setgroups([])
+ os.setgid(vdsm_gid)
+ os.setuid(vdsm_uid)
+ dropPrivileges()
__assertLogPermission()
- os.setpgrp()
- parse_args()
run()
--
To view, visit http://gerrit.ovirt.org/11191
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I540b1d3f3c823433f100f4803f31322fc7ee2153
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Royce Lv <lvroyce(a)linux.vnet.ibm.com>
10 years, 7 months
Change in vdsm[master]: Don't monitor the usage of '/var/run/vdsm' in diskStats
by wudxw@linux.vnet.ibm.com
Mark Wu has uploaded a new change for review.
Change subject: Don't monitor the usage of '/var/run/vdsm' in diskStats
......................................................................
Don't monitor the usage of '/var/run/vdsm' in diskStats
Engine raises a warning that free space in /var/run/vdsm is less than 1GB.
But actually, '/var/run' is mounted as a tmpfs. By default, the maximum size
is half of the total physical memory. The warning is misleading because user
could find it still has a lot of free space on the disk filesystem.
And 1GB shouldn't be a low threshold for memory. So we needn't monitor the
usage of '/var/run/vdsm' in 'diskStats' since it could be covered by memory
usage monitoring.
Bug-uri: https://bugzilla.redhat.com/show_bug.cgi?id=906788
Change-Id: Idb0a4ae2cf7ceb6297e348d9e90c166373461ca1
Signed-off-by: Mark Wu <wudxw(a)linux.vnet.ibm.com>
---
M vdsm/utils.py
1 file changed, 1 insertion(+), 2 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/75/11675/1
diff --git a/vdsm/utils.py b/vdsm/utils.py
index 35338a0..57c3616 100644
--- a/vdsm/utils.py
+++ b/vdsm/utils.py
@@ -231,8 +231,7 @@
Contains the sate of the host in the time of initialization.
"""
- MONITORED_PATHS = ['/tmp', '/var/log', '/var/log/core',
- constants.P_VDSM_RUN]
+ MONITORED_PATHS = ['/tmp', '/var/log', '/var/log/core']
def _getDiskStats(self):
d = {}
--
To view, visit http://gerrit.ovirt.org/11675
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Idb0a4ae2cf7ceb6297e348d9e90c166373461ca1
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Mark Wu <wudxw(a)linux.vnet.ibm.com>
10 years, 7 months
Change in vdsm[master]: Add ut to supervdsmServer zombie reaper
by lvroyce@linux.vnet.ibm.com
Royce Lv has uploaded a new change for review.
Change subject: Add ut to supervdsmServer zombie reaper
......................................................................
Add ut to supervdsmServer zombie reaper
Change-Id: I7fb8f3dcd575266febe11967e3f8df7d3588acb8
Signed-off-by: Royce Lv<lvroyce(a)linux.vnet.ibm.com>
---
M tests/superVdsmTests.py
M vdsm/supervdsmServer.py
2 files changed, 12 insertions(+), 7 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/54/9354/1
diff --git a/tests/superVdsmTests.py b/tests/superVdsmTests.py
index 20b1591..cb52ff1 100644
--- a/tests/superVdsmTests.py
+++ b/tests/superVdsmTests.py
@@ -4,6 +4,7 @@
import tempfile
from vdsm import utils
import os
+from time import sleep
class TestSuperVdsm(TestCaseBase):
@@ -16,8 +17,11 @@
self.pidfd, pidfile = tempfile.mkstemp()
self.timefd, timestamp = tempfile.mkstemp()
self.addfd, address = tempfile.mkstemp()
+ self.pidfile = pidfile
self._proxy.setIPCPaths(pidfile, timestamp, address)
+ self._proxy.ping()
+ self.assertTrue(self._proxy.isRunning())
def tearDown(self):
for fd in (self.pidfd, self.timefd, self.addfd):
@@ -25,19 +29,19 @@
self._proxy.kill() # cleanning old temp files
def testIsSuperUp(self):
- self._proxy.ping() # this call initiate svdsm
-
- self.assertTrue(self._proxy.isRunning())
+ pass
def testKillSuper(self):
- self._proxy.ping()
self._proxy.kill()
self.assertFalse(self._proxy.isRunning())
self._proxy.ping() # Launching vdsm after kill
self.assertTrue(self._proxy.isRunning())
def testNoPidFile(self):
- self._proxy.ping() # svdsm is up
- self.assertTrue(self._proxy.isRunning())
utils.rmFile(self._proxy.timestamp)
self.assertFalse(self._proxy.isRunning())
+
+ def testZombieReaper(self):
+ reapedPid = self._proxy.ping()
+ sleep(10) # supervdsmServer checks SIGCHLD every 5 secs
+ self.assertRaises(OSError, os.waitpid, reapedPid, os.WNOHANG)
diff --git a/vdsm/supervdsmServer.py b/vdsm/supervdsmServer.py
index 1287fef..56dd5fd 100755
--- a/vdsm/supervdsmServer.py
+++ b/vdsm/supervdsmServer.py
@@ -92,7 +92,8 @@
@logDecorator
def ping(self, *args, **kwargs):
# This method exists for testing purposes
- return True
+ res = self._runAs(DISKIMAGE_USER, (DISKIMAGE_GROUP,), os.getpid)
+ return res
@logDecorator
def getDevicePartedInfo(self, *args, **kwargs):
--
To view, visit http://gerrit.ovirt.org/9354
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I7fb8f3dcd575266febe11967e3f8df7d3588acb8
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Royce Lv <lvroyce(a)linux.vnet.ibm.com>
10 years, 7 months
Change in vdsm[master]: Get oop from object instance
by shuming@linux.vnet.ibm.com
Shu Ming has uploaded a new change for review.
Change subject: Get oop from object instance
......................................................................
Get oop from object instance
In FileStorageDomain non-class method, self.oop should
be used instead of oop
Change-Id: Ia6b86f2c5e382774003772837e32bd62f8ad29ce
Signed-off-by: Shu Ming <shuming(a)linux.vnet.ibm.com>
---
M vdsm/storage/fileSD.py
1 file changed, 1 insertion(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/20/8920/1
diff --git a/vdsm/storage/fileSD.py b/vdsm/storage/fileSD.py
index f92ed98..7d6e1ab 100644
--- a/vdsm/storage/fileSD.py
+++ b/vdsm/storage/fileSD.py
@@ -177,7 +177,7 @@
def getReadDelay(self):
t = time.time()
- oop.getProcessPool(self.sdUUID).directReadLines(self.metafile)
+ self.oop.getProcessPool(self.sdUUID).directReadLines(self.metafile)
return time.time() - t
def getFileList(self, pattern, caseSensitive):
--
To view, visit http://gerrit.ovirt.org/8920
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ia6b86f2c5e382774003772837e32bd62f8ad29ce
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Shu Ming <shuming(a)linux.vnet.ibm.com>
10 years, 7 months
Change in vdsm[master]: gluster: VDSM Gluster verb to set swift configuration
by avishwan@redhat.com
Aravinda VK has uploaded a new change for review.
Change subject: gluster: VDSM Gluster verb to set swift configuration
......................................................................
gluster: VDSM Gluster verb to set swift configuration
VDSM Gluster verb for UFO(Unified File Object) configuration
manage.(setting the swift configuration)
glusterSwiftConfigSet: Set the config items from swift
proxy/account/object/container server
Change-Id: Ibba9c30d1ad4926c88e58cb4bc9a858b8a07f77b
Signed-off-by: Aravinda VK <avishwan(a)redhat.com>
---
M vdsm/gluster/api.py
M vdsm/gluster/exception.py
M vdsm/gluster/swift.py
M vdsm_cli/vdsClientGluster.py
4 files changed, 95 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/83/13383/1
diff --git a/vdsm/gluster/api.py b/vdsm/gluster/api.py
index bb0666f..e6ea775 100644
--- a/vdsm/gluster/api.py
+++ b/vdsm/gluster/api.py
@@ -225,6 +225,11 @@
configOption)
return {"config": configValues}
+ @exportAsVerb
+ def swiftConfigSet(self, serverType, configDict, options=None):
+ self.svdsmProxy.glusterSwiftConfigSet(serverType,
+ configDict)
+
def getGlusterMethods(gluster):
l = []
diff --git a/vdsm/gluster/exception.py b/vdsm/gluster/exception.py
index 89421d4..c66e4b2 100644
--- a/vdsm/gluster/exception.py
+++ b/vdsm/gluster/exception.py
@@ -419,3 +419,8 @@
def __init__(self, opt=""):
self.err = ["Invalid Swift Config option: %s" % opt]
+
+
+class GlusterSwiftConfigWriteFailedException(GlusterGeneralException):
+ code = 4412
+ message = "Swift Config file write failed"
diff --git a/vdsm/gluster/swift.py b/vdsm/gluster/swift.py
index 4442dd5..6dc6f5d 100644
--- a/vdsm/gluster/swift.py
+++ b/vdsm/gluster/swift.py
@@ -19,6 +19,8 @@
#
from ConfigParser import DEFAULTSECT, ConfigParser
+import tempfile
+import shutil
from . import exportToSuperVdsm
import exception as ge
@@ -104,3 +106,60 @@
@exportToSuperVdsm
def swiftConfigGet(serverType, section=None, configOption=None):
return _configGet(serverType, section, configOption)
+
+
+def _writeSwiftConfigFile(serverType, config):
+ config_file = SWIFT_CONFIG_FILES[serverType]
+ try:
+ # Write to a temp file
+ tempConfigFile = tempfile.NamedTemporaryFile(mode="wb", delete=False)
+ with open(tempConfigFile.name, 'wb') as configFile:
+ config.write(configFile)
+
+ # If src and dst are two different file system, then os.rename
+ # fails, In this case if temp file created in /tmp and if /tmp is
+ # seperate fs then os.rename gives following error, so use shutil
+ # OSError: [Errno 18] Invalid cross-device link
+ # mail.python.org/pipermail/python-list/2005-February/342893.html
+ shutil.move(tempConfigFile.name, config_file)
+ except IOError as e:
+ errMsg = "[Errno %s] %s: '%s'" % (e.errno, e.strerror, e.filename)
+ raise ge.GlusterSwiftConfigWriteFailedException(err=[errMsg])
+
+
+def _configSet(serverType, configDict):
+ """
+ Set config values in swift config files
+ :param serverType Type of swift server(gluster-swift-proxy,
+ gluster-swift-account, gluster-swift-object, gluster-swift-container)
+ :param configDict Dict of items to update with section name as key
+ Ex: {"section1": {"configOpt1": "value1", "configOpt2": "value2"},..}
+ """
+ config = _openSwiftConfigFile(serverType)
+
+ # Every section query automatically includes DEFAULTS options,
+ # to avoid this remove default options and reconstruct at the end
+ defaultValues = dict(config.defaults())
+ config._defaults = {}
+
+ for section in configDict:
+ if not config.has_section(section) and section.upper() != DEFAULTSECT:
+ config.add_section(section)
+
+ for configOption in configDict[section]:
+ value = configDict[section][configOption]
+ # Since default values seperated from main config object
+ # update defaultValues dict instead of config object
+ if section == DEFAULTSECT:
+ defaultValues[configOption] = value
+ else:
+ config.set(section, configOption, value)
+
+ # Restore defaults
+ config._defaults = defaultValues
+ _writeSwiftConfigFile(serverType, config)
+
+
+@exportToSuperVdsm
+def swiftConfigSet(serverType, configDict):
+ _configSet(serverType, configDict)
diff --git a/vdsm_cli/vdsClientGluster.py b/vdsm_cli/vdsClientGluster.py
index ba1af88..6f80eb0 100644
--- a/vdsm_cli/vdsClientGluster.py
+++ b/vdsm_cli/vdsClientGluster.py
@@ -18,6 +18,7 @@
#
import pprint as pp
+import ast
from vdsClient import service
@@ -325,6 +326,22 @@
pp.pprint(status)
return status['status']['code'], status['status']['message']
+ def do_glusterSwiftConfigSet(self, args):
+ params = self._eqSplit(args)
+ serverType = params.get('serverType', '')
+ if serverType == "":
+ raise ValueError
+ configDictStr = params.get('configDict', '{}')
+
+ try:
+ configDict = ast.literal_eval(configDictStr)
+ except:
+ raise ValueError
+
+ status = self.s.glusterSwiftConfigSet(serverType, configDict)
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
def getGlusterCmdDict(serv):
return \
@@ -546,4 +563,13 @@
'configOption is the config item in respective config file',
'returns the Swift config values'
)),
+ 'glusterSwiftConfigSet': (
+ serv.do_glusterSwiftConfigSet,
+ ('serverType=<serverType> configDict=<configDict>\n\t',
+ 'serverType is the type of swift service(proxy-server, '
+ 'object-server, account-server, container-server)',
+ 'configDict dict of config items which needs update'
+ '{"section1": {"key1": "value1", "key2": "value2"}..}',
+ 'Updates the Swift configuration file'
+ )),
}
--
To view, visit http://gerrit.ovirt.org/13383
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ibba9c30d1ad4926c88e58cb4bc9a858b8a07f77b
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Aravinda VK <avishwan(a)redhat.com>
10 years, 7 months
Change in vdsm[master]: vdsm: Gluster UFO verbs
by avishwan@redhat.com
Aravinda VK has uploaded a new change for review.
Change subject: vdsm: Gluster UFO verbs
......................................................................
vdsm: Gluster UFO verbs
VDSM Gluster verbs for UFO(Unified File Object) using Swift
services.
glusterGetServices: To get the status of all UFO services
(glusterd, smb, memcached, swift)
glusterManageServices: To start/stop/restart UFO related
services(glusterd, smb, memcached, swift)
glusterGetSwiftConfig: Get swift config items
glusterSetSwiftConfig: Set the config items
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=850443
Change-Id: Ie966fb515275a0768f67cbbe2055a07002355327
Signed-off-by: Aravinda VK <avishwan(a)redhat.com>
---
M vdsm/gluster/api.py
M vdsm/gluster/cli.py
M vdsm/gluster/exception.py
M vdsm_cli/vdsClientGluster.py
4 files changed, 360 insertions(+), 2 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/64/10864/1
diff --git a/vdsm/gluster/api.py b/vdsm/gluster/api.py
index 5f0b0ed..eb760b6 100644
--- a/vdsm/gluster/api.py
+++ b/vdsm/gluster/api.py
@@ -216,6 +216,27 @@
status = self.svdsmProxy.glusterVolumeProfileInfo(volumeName, nfs)
return {'profileInfo': status}
+ @exportAsVerb
+ def getSwiftConfig(self, serverType, section=None, configOpt=None):
+ section = None if section == "" else section
+ configOpt = None if configOpt == "" else configOpt
+ return self.svdsmProxy.glusterGetSwiftConfig(serverType,
+ section,
+ configOpt)
+
+ @exportAsVerb
+ def setSwiftConfig(self, serverType, configDict):
+ return self.svdsmProxy.glusterSetSwiftConfig(serverType,
+ configDict)
+
+ @exportAsVerb
+ def manageServices(self, servicesDict):
+ return self.svdsmProxy.glusterManageServices(servicesDict)
+
+ @exportAsVerb
+ def getServices(self):
+ return self.svdsmProxy.glusterGetServices()
+
def getGlusterMethods(gluster):
l = []
diff --git a/vdsm/gluster/cli.py b/vdsm/gluster/cli.py
index 7136281..58d8890 100644
--- a/vdsm/gluster/cli.py
+++ b/vdsm/gluster/cli.py
@@ -25,10 +25,30 @@
from vdsm import netinfo
import exception as ge
from hostname import getHostNameFqdn, HostNameException
+import ConfigParser
+import re
_glusterCommandPath = utils.CommandPath("gluster",
"/usr/sbin/gluster",
)
+
+statusTypes = {"INACTIVE": "STOPPED",
+ "STOPPED": "STOPPED",
+ "ACTIVE": "RUNNING",
+ "RUNNING": "RUNNING",
+ "FAILED": "FAILED"}
+
+CONFIG_FILES = {"proxy-server": "/etc/swift/proxy-server.conf",
+ "account-server": "/etc/swift/account-server/1.conf",
+ "object-server": "/etc/swift/object-server/1.conf",
+ "container-server": "/etc/swift/container-server/1.conf"}
+
+SUPPORTED_GLUSTER_SERVICES = ["glusterd", "memcached", "swift", "smb"]
+
+SWIFT_SERVICES = ["proxy-server",
+ "container-server",
+ "account-server",
+ "object-server"]
def _getGlusterVolCmd():
@@ -37,6 +57,13 @@
def _getGlusterPeerCmd():
return [_glusterCommandPath.cmd, "--mode=script", "peer"]
+
+
+def _getGlusterServiceCmd(serviceName, action):
+ if serviceName == "swift":
+ return ["/usr/bin/swift-init", "main", action]
+ else:
+ return ["/sbin/service", serviceName, action]
def exportToSuperVdsm(func):
@@ -880,3 +907,225 @@
return _parseVolumeProfileInfo(xmltree, nfs)
except (etree.ParseError, AttributeError, ValueError):
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
+
+
+def _parseSwiftServiceStatus(statusStr):
+ statusLines = statusStr.split("\n")
+ swiftStatus = []
+ noServicesRunningPat = "No\s(%s)" % "|".join(SWIFT_SERVICES)
+ servicesRunningPat = "(%s) running \((\d+)\s+" % "|".join(SWIFT_SERVICES)
+
+ for line in statusLines:
+ noServiceRunningMatch = re.search(noServicesRunningPat,
+ line,
+ re.IGNORECASE)
+ serviceRunningMatch = re.search(servicesRunningPat,
+ line,
+ re.IGNORECASE)
+
+ if noServiceRunningMatch:
+ swiftStatus.append({"name": noServiceRunningMatch.group(1),
+ "pid": "",
+ "status": "STOPPED"})
+ elif serviceRunningMatch:
+ swiftStatus.append({"name": serviceRunningMatch.group(1),
+ "pid": serviceRunningMatch.group(2),
+ "status": "RUNNING"})
+
+ if len(swiftStatus) == 0:
+ for service in SWIFT_SERVICES:
+ swiftStatus.append({"name": service,
+ "status": "STOPPED",
+ "pid": ""})
+
+ return swiftStatus
+
+
+def _parseServiceStatus(name, statusStr):
+ """
+ Sample output:
+ case 1: running
+ glusterd (pid 15943) is running...
+
+ case2: stopped
+ glusterd is stopped
+ """
+ lines = statusStr.split("\n")
+ serviceStatus = {"name": name, "status": "STOPPED", "pid": ""}
+ m = re.search("\(PID\s+(\d+)\).+(RUNNING)", statusStr, re.IGNORECASE)
+ if m:
+ serviceStatus["pid"] = m.group(1)
+ serviceStatus["status"] = statusTypes[m.group(2).upper()]
+ elif re.search("FAILED", statusStr.upper()):
+ serviceStatus["status"] = statusTypes["FAILED"]
+
+ return [serviceStatus]
+
+
+def _parseSystemCtlStatus(name, statusStr):
+ serviceStatus = {"name": name, "status": "STOPPED", "pid": ""}
+ lines = statusStr.split("\n")
+ for line in lines:
+ statusMatch = re.search("Active:\s+(\S+)\s", line, re.IGNORECASE)
+ pidMatch = re.search("Main PID:\s+(\S+)\s", line, re.IGNORECASE)
+ if statusMatch:
+ serviceStatus["status"] = statusTypes[statusMatch.group(1).upper()]
+ elif pidMatch and serviceStatus["status"] == "RUNNING":
+ serviceStatus["pid"] = pidMatch.group(1)
+
+ return [serviceStatus]
+
+
+def _getConfigItemsAsDict(items):
+ configItems = {}
+ for item in items:
+ configItems[item[0]] = item[1]
+ return configItems
+
+
+def _parseStatus(serviceName, statusStr):
+ if serviceName == "swift":
+ status = _parseSwiftServiceStatus(statusStr)
+ elif re.search("Loaded:", statusStr, re.IGNORECASE):
+ status = _parseSystemCtlStatus(serviceName, statusStr)
+ else:
+ status = _parseServiceStatus(serviceName, statusStr)
+
+ return status
+
+
+@exportToSuperVdsm
+def manageServices(serviceDict):
+ """
+ Performs start/stop/restart the gluster related services
+
+ :param servicesDict dict with serviceName as key and action as value
+ Ex: {"glusterd": "start", "swift": "stop"}
+ :returns {"value": True}
+ """
+ serviceErrorMsg = ["Service not supported"]
+ for serviceName in serviceDict:
+ action = serviceDict[serviceName]
+
+ # Allowing to start any service can have side effects
+ if serviceName not in SUPPORTED_GLUSTER_SERVICES:
+ raise ge.ManageGlusterServicesFailedException(1,
+ [""],
+ serviceErrorMsg)
+
+ cmd = _getGlusterServiceCmd(serviceName, action)
+
+ rc, out, err = _execGluster(cmd)
+ return {"value": True}
+
+
+@exportToSuperVdsm
+def getServices():
+ """
+ Returns status of all gluster services
+
+ :param None
+ :returns list of dict with PID and status details for each service
+ Ex: {"value": [{"name": "glusterd", "status": "RUNNING", pid: "1027"},..]}
+ """
+ serviceStatus = []
+
+ for serviceName in SUPPORTED_GLUSTER_SERVICES:
+ rc, out, err = _execGluster(_getGlusterServiceCmd(serviceName,
+ "status"))
+ serviceStatus += _parseStatus(serviceName, "\n".join(out))
+
+ return {"value": serviceStatus}
+
+
+@exportToSuperVdsm
+def getSwiftConfig(serverType, section=None, configOption=None):
+ """
+ Get values from the Swift config files, If section is None return all
+ the config items from file, else return config items only from that
+ section.
+ If configOption and section is set then return respective config item
+ else return all config items.
+
+ :param serverType Type of swift server(proxy-server, account-server,
+ object-server, container-server)
+ :param section (Optional) Section in config file
+ :param configOption (Optional) Config item in a section
+ :returns Dict with section name as keys
+ Ex: {"value": {"section": {"key1": "value1", "key2": "value2"}}}
+ """
+ config = ConfigParser.ConfigParser()
+ config_file = CONFIG_FILES[serverType]
+ config.readfp(open(config_file))
+ configValues = {}
+
+ # Since config.items or config.get for any section will include default
+ # values in the resulting items list of a section. To avoid including
+ # default options in every section
+ defaultValues = config.defaults()
+ config._defaults = {}
+
+ if section == None:
+ sections = config.sections()
+ configValues['DEFAULT'] = defaultValues
+
+ for section in sections:
+ items = _getConfigItemsAsDict(config.items(section))
+ configValues[section] = items
+ elif configOption == None:
+ if section.upper() == "DEFAULT":
+ configValues[section] = defaultValues
+ else:
+ items = {}
+ if config.has_section(section):
+ items = _getConfigItemsAsDict(config.items(section))
+ configValues[section] = items
+ elif config.has_option(section, configOption):
+ value = config.get(section, configOption)
+ configValues = {section: {configOption: value}}
+ else:
+ configValues = {section: {configOption: ""}}
+
+ return {"value": configValues}
+
+
+@exportToSuperVdsm
+def setSwiftConfig(serverType, configDict):
+ """
+ Set config values in swift config files
+ :param serverType Type of swift server(proxy-server, account-server,
+ object-server, container-server)
+ :param configDict Dict of items to update with section name as key
+ Ex: {"section1": {"configOpt1": "value1", "configOpt2": "value2"},..}
+ :returns Sets the config item and returns configChanged flag to indicate if
+ atleast one config item is changed, which helps engine to decide swift
+ restart is required or not. Ex: {"value": {"configChanged": True}}
+ """
+ config = ConfigParser.ConfigParser()
+ config_file = CONFIG_FILES[serverType]
+ config.readfp(open(config_file))
+ configChanged = False
+
+ for section in configDict:
+ if not config.has_section(section) and section.upper() != "DEFAULT":
+ config.add_section(section)
+
+ for configOption in configDict[section]:
+ value = configDict[section][configOption]
+ if config.has_option(section, configOption):
+ curr_val = config.get(section, configOption)
+ else:
+ curr_val = None
+
+ if curr_val != value:
+ config.set(section, configOption, str(value))
+ configChanged = True
+
+ # Update the config file only if atleast one config
+ # item updated
+ if configChanged:
+ f = open(config_file, "wb")
+ config.write(f)
+ f.close()
+
+ return {"value": {"configChanged": configChanged}}
diff --git a/vdsm/gluster/exception.py b/vdsm/gluster/exception.py
index e921d7d..0d520b5 100644
--- a/vdsm/gluster/exception.py
+++ b/vdsm/gluster/exception.py
@@ -390,3 +390,8 @@
class GlusterHostsListFailedException(GlusterHostException):
code = 4407
message = "Hosts list failed"
+
+
+class ManageGlusterServicesFailedException(GlusterException):
+ code = 4408
+ message = "start/stop/restart of Gluster services failed"
diff --git a/vdsm_cli/vdsClientGluster.py b/vdsm_cli/vdsClientGluster.py
index be47696..e7ae180 100644
--- a/vdsm_cli/vdsClientGluster.py
+++ b/vdsm_cli/vdsClientGluster.py
@@ -18,7 +18,7 @@
#
import pprint as pp
-
+import json
from vdsClient import service
@@ -272,6 +272,56 @@
pp.pprint(status)
return status['status']['code'], status['status']['message']
+ def do_glusterGetSwiftConfig(self, args):
+ params = self._eqSplit(args)
+ serverType = params.get('serverType', '')
+ if serverType == "":
+ raise ValueError
+
+ section = params.get('section', '')
+ configOption = params.get('configOption', '')
+
+ status = self.s.glusterGetSwiftConfig(serverType,
+ section,
+ configOption)
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterSetSwiftConfig(self, args):
+ params = self._eqSplit(args)
+ serverType = params.get('serverType', '')
+ if serverType == "":
+ raise ValueError
+ configDictStr = params.get('configDict', '{}')
+
+ try:
+ configDict = json.loads(configDictStr)
+ except:
+ raise ValueError
+
+ status = self.s.glusterSetSwiftConfig(serverType, configDict)
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterManageServices(self, args):
+ params = self._eqSplit(args)
+ servicesDictStr = params.get('servicesDict', '')
+ if servicesDictStr == "":
+ raise ValueError
+ try:
+ servicesDict = json.loads(servicesDictStr)
+ except:
+ raise ValueError
+
+ status = self.s.glusterManageServices(servicesDict)
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
+ def do_glusterGetServices(self, args):
+ status = self.s.glusterGetServices()
+ pp.pprint(status)
+ return status['status']['code'], status['status']['message']
+
def getGlusterCmdDict(serv):
return \
@@ -468,4 +518,37 @@
('volumeName=<volume_name> [nfs={yes|no}]\n\t'
'<volume_name> is existing volume name',
'get gluster volume profile info'
- )), }
+ )),
+ 'glusterGetSwiftConfig': (
+ serv.do_glusterGetSwiftConfig,
+ ('serverType=<serverType> [section=<section> '
+ '[configOpt=<configOpt>]]\n\t',
+ 'serverType is the type of swift service(proxy-server, '
+ 'object-server, account-server, container-server)',
+ 'section is the section in the config file',
+ 'configOption is the config item in respective config file',
+ 'returns the Swift config values'
+ )),
+ 'glusterSetSwiftConfig': (
+ serv.do_glusterSetSwiftConfig,
+ ('serverType=<serverType> configDict=<configDict>\n\t',
+ 'serverType is the type of swift service(proxy-server, '
+ 'object-server, account-server, container-server)',
+ 'configDict dict of config items which needs update'
+ '{"section1": {"key1": "value1", "key2": "value2"}..}',
+ 'Updates the Swift configuration file'
+ )),
+ 'glusterManageServices': (
+ serv.do_glusterManageServices,
+ ('servicesDict=<servicesDict>\n\t',
+ 'servicesDict is dict of services with service name as key and '
+ 'action as value( Ex: {"glusterd": "start", "swift": "stop"})',
+ 'Performs start/stop/restart of gluster services'
+ )),
+ 'glusterGetServices': (
+ serv.do_glusterGetServices,
+ ('',
+ 'Returns status of all gluster services'
+ '(swift, glusterd, smb, memcached)'
+ )),
+ }
--
To view, visit http://gerrit.ovirt.org/10864
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ie966fb515275a0768f67cbbe2055a07002355327
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Aravinda VK <avishwan(a)redhat.com>
10 years, 7 months
Change in vdsm[master]: vdsm: Add after_vdsm_start hook
by vvolansk@redhat.com
Vered Volansky has uploaded a new change for review.
Change subject: vdsm: Add after_vdsm_start hook
......................................................................
vdsm: Add after_vdsm_start hook
Bug-Url: https://bugzilla.redhat.com/842674
Change-Id: Id02b2b7d091ffa0be57a3850fa97cdcd35778466
Signed-off-by: Vered Volansky <vvolansk(a)redhat.com>
---
M vdsm.spec.in
M vdsm/hooks.py
M vdsm/vdsmd.8.in
M vdsm/vdsmd.init.in
M vdsm_hooks/Makefile.am
5 files changed, 13 insertions(+), 4 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/43/13543/1
diff --git a/vdsm.spec.in b/vdsm.spec.in
index dbb9d04..f01e348 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -821,6 +821,7 @@
%dir %{_libexecdir}/%{vdsm_name}/hooks/before_disk_hotunplug
%dir %{_libexecdir}/%{vdsm_name}/hooks/after_disk_hotunplug
%dir %{_libexecdir}/%{vdsm_name}/hooks/before_vdsm_start
+%dir %{_libexecdir}/%{vdsm_name}/hooks/after_vdsm_start
%dir %{_libexecdir}/%{vdsm_name}/hooks/after_vdsm_stop
%{_datadir}/%{vdsm_name}/addNetwork
%{_datadir}/%{vdsm_name}/configNetwork.py*
diff --git a/vdsm/hooks.py b/vdsm/hooks.py
index 95516a8..b18a4af 100644
--- a/vdsm/hooks.py
+++ b/vdsm/hooks.py
@@ -221,6 +221,10 @@
return _runHooksDir(None, 'before_vdsm_start', raiseError=False)
+def after_vdsm_start():
+ return _runHooksDir(None, 'after_vdsm_start', raiseError=False)
+
+
def after_vdsm_stop():
return _runHooksDir(None, 'after_vdsm_stop', raiseError=False)
diff --git a/vdsm/vdsmd.8.in b/vdsm/vdsmd.8.in
index 9a1a268..7f62ceb 100644
--- a/vdsm/vdsmd.8.in
+++ b/vdsm/vdsmd.8.in
@@ -51,15 +51,16 @@
before_nic_hotunplug, after_nic_hotunplug, after_nic_unhotplug_fail,
before_disk_hotplug, after_disk_hotplug,
before_disk_hotunplug, after_disk_hotunplug,
- before_vdsm_start, after_vdsm_stop.
+ before_vdsm_start, after_vdsm_start, after_vdsm_stop.
Each hook executes the scripts under
.FN /usr/libexec/vdsm/hooks/<hook-name>/
in lexicographic order.
.SS Hook environment
-Each hook script (except before_vdsm_start and after_vdsm_stop) inherit the
-environment of the VDSM process, with an additional variable
+Each hook script (except before_vdsm_start, after_vdsm_start,
+and after_vdsm_stop) inherit the environment of the VDSM process,
+with an additional variable
.B _hook_domxml
which holds the path of libvirt's
.B domain xml
@@ -84,7 +85,7 @@
with a set of params passed by the caller of setVmTicket.
.SS Hook execution
-before_vdsm_start and after_vdsm_stop scripts are executed as user
+before_vdsm_start, after_vdsm_start, and after_vdsm_stop scripts are executed as user
.I root.
All the other hooks are executed as user
.I vdsm.
diff --git a/vdsm/vdsmd.init.in b/vdsm/vdsmd.init.in
index c804ce8..f241086 100755
--- a/vdsm/vdsmd.init.in
+++ b/vdsm/vdsmd.init.in
@@ -511,6 +511,8 @@
RETVAL=$?
[ "$RETVAL" -eq 0 ] && log_success_msg $"$prog start" || log_failure_msg $"$prog start"
[ "$RETVAL" -eq 0 ] && touch /var/lock/subsys/vdsmd
+
+ python @VDSMDIR(a)/hooks.pyc after_vdsm_start
}
stop() {
diff --git a/vdsm_hooks/Makefile.am b/vdsm_hooks/Makefile.am
index fa5a795..6e1f1fe 100644
--- a/vdsm_hooks/Makefile.am
+++ b/vdsm_hooks/Makefile.am
@@ -88,6 +88,7 @@
before_disk_hotunplug \
after_disk_hotunplug \
before_vdsm_start \
+ after_vdsm_start \
after_vdsm_stop
all-local: \
--
To view, visit http://gerrit.ovirt.org/13543
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Id02b2b7d091ffa0be57a3850fa97cdcd35778466
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Vered Volansky <vvolansk(a)redhat.com>
10 years, 7 months