Nir Soffer has uploaded a new change for review.
Change subject: rwlock: Replace misc.RWLock
......................................................................
rwlock: Replace misc.RWLock
This patch removes misc.RWLock and replace it with simpler
rwlock.RWLock.
The new lock does not support recursive locking or lock demotion, but I
think they are not used in by current code - not tested yet.
Change-Id: I9ae6064e8e031339303e64606a70673807c4083a
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
M vdsm/storage/misc.py
M vdsm/storage/resourceManager.py
2 files changed, 4 insertions(+), 112 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/23/43423/1
diff --git a/vdsm/storage/misc.py b/vdsm/storage/misc.py
index b14fd26..e45fad8 100644
--- a/vdsm/storage/misc.py
+++ b/vdsm/storage/misc.py
@@ -530,115 +530,6 @@
return 0
-class RWLock(object):
- """
- A simple ReadWriteLock implementation.
-
- The lock must be released by the thread that acquired it. Once a thread
- has acquired a lock, the same thread may acquire it again without blocking;
- the thread must release it once for each time it has acquired it. Note that
- lock promotion (acquiring an exclusive lock under a shared lock is
- forbidden and will raise an exception.
-
- The lock puts all requests in a queue. The request is granted when The
- previous one is released.
-
- Each request is represented by a :class:`threading.Event` object. When the
- Event is set the request is granted. This enables multiple callers to wait
- for a request thus implementing a shared lock.
- """
- class _contextLock(object):
- def __init__(self, owner, exclusive):
- self._owner = owner
- self._exclusive = exclusive
-
- def __enter__(self):
- self._owner.acquire(self._exclusive)
-
- def __exit__(self, exc_type, exc_value, traceback):
- self._owner.release()
-
- def __init__(self):
- self._syncRoot = threading.Lock()
- self._queue = Queue.Queue()
- self._currentSharedLock = None
- self._currentState = None
- self._holdingThreads = {}
-
- self.shared = self._contextLock(self, False)
- self.exclusive = self._contextLock(self, True)
-
- def acquireRead(self):
- return self.acquire(False)
-
- def acquireWrite(self):
- return self.acquire(True)
-
- def acquire(self, exclusive):
- currentEvent = None
- currentThread = threading.currentThread()
-
- # Handle reacquiring lock in the same thread
- if currentThread in self._holdingThreads:
- if self._currentState is False and exclusive:
- raise RuntimeError("Lock promotion is forbidden.")
-
- self._holdingThreads[currentThread] += 1
- return
-
- with self._syncRoot:
- # Handle regular acquisition
- if exclusive:
- currentEvent = threading.Event()
- self._currentSharedLock = None
- else:
- if self._currentSharedLock is None:
- self._currentSharedLock = threading.Event()
-
- currentEvent = self._currentSharedLock
-
- try:
- self._queue.put_nowait((currentEvent, exclusive))
- except Queue.Full:
- raise RuntimeError("There are too many objects waiting for "
- "this lock")
-
- if self._queue.unfinished_tasks == 1:
- # Bootstrap the process if needed. A lock is released the when
- # the next request is granted. When there is no one to grant
- # the request you have to grant it yourself.
- event, self._currentState = self._queue.get_nowait()
- event.set()
-
- currentEvent.wait()
-
- self._holdingThreads[currentThread] = 0
-
- def release(self):
- currentThread = threading.currentThread()
-
- if currentThread not in self._holdingThreads:
- raise RuntimeError("Releasing an lock without acquiring it first")
-
- # If in nested lock don't really release
- if self._holdingThreads[currentThread] > 0:
- self._holdingThreads[currentThread] -= 1
- return
-
- del self._holdingThreads[currentThread]
-
- with self._syncRoot:
- self._queue.task_done()
-
- if self._queue.empty():
- self._currentState = None
- return
-
- nextRequest, self._currentState = self._queue.get_nowait()
-
- nextRequest.set()
-
-
class DynamicBarrier(object):
def __init__(self):
self._cond = threading.Condition()
diff --git a/vdsm/storage/resourceManager.py b/vdsm/storage/resourceManager.py
index b1b0dc7..a7e67a2 100644
--- a/vdsm/storage/resourceManager.py
+++ b/vdsm/storage/resourceManager.py
@@ -29,6 +29,7 @@
import storage_exception as se
import misc
from logUtils import SimpleLogAdapter
+from vdsm import rwlock
from vdsm import utils
@@ -287,7 +288,7 @@
self.autoRelease = True
self._isValid = True
- self._syncRoot = misc.RWLock()
+ self._syncRoot = rwlock.RWLock()
def __wrapObj(self):
for attr in dir(self.__wrappedObject):
@@ -380,11 +381,11 @@
"""
def __init__(self, factory):
self.resources = {}
- self.lock = threading.Lock() # misc.RWLock()
+ self.lock = threading.Lock() # rwlock.RWLock()
self.factory = factory
def __init__(self):
- self._syncRoot = misc.RWLock()
+ self._syncRoot = rwlock.RWLock()
self._namespaces = {}
@classmethod
--
To view, visit https://gerrit.ovirt.org/43423
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I9ae6064e8e031339303e64606a70673807c4083a
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Nir Soffer <nsoffer(a)redhat.com>
Nir Soffer has uploaded a new change for review.
Change subject: rwlock: Add simpler RWLock supporting acquire timeout
......................................................................
rwlock: Add simpler RWLock supporting acquire timeout
We have RWLock implementation in storage.misc, used for by the resource
manager. We want to simplify the resource manager, which is way too
complex to understand or maintain.
Patch https://gerrit.ovirt.org/42773 suggests to add a non-blocking
acquire to the current RWLock, needed for the new simple lock manager.
However, adding more code to the current RWLock is not a good idea.
Instead, this patch replace the current implementation with a simpler
one.
This implementation does not support recursive locking; I'm not sure we
need this, and usually having a recursive lock is a design smell. We
will add it later only if required.
Change-Id: I2466c137c89598772fb46347eb02195916883cac
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
M debian/vdsm-python.install
M lib/vdsm/Makefile.am
A lib/vdsm/rwlock.py
M tests/rwlock_test.py
M vdsm.spec.in
5 files changed, 95 insertions(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/08/42908/1
diff --git a/debian/vdsm-python.install b/debian/vdsm-python.install
index 57d5033..3047d9f 100644
--- a/debian/vdsm-python.install
+++ b/debian/vdsm-python.install
@@ -28,6 +28,7 @@
./usr/lib/python2.7/dist-packages/vdsm/pthread.py
./usr/lib/python2.7/dist-packages/vdsm/qemuimg.py
./usr/lib/python2.7/dist-packages/vdsm/response.py
+./usr/lib/python2.7/dist-packages/vdsm/rwlock.py
./usr/lib/python2.7/dist-packages/vdsm/schedule.py
./usr/lib/python2.7/dist-packages/vdsm/sslutils.py
./usr/lib/python2.7/dist-packages/vdsm/tool/__init__.py
diff --git a/lib/vdsm/Makefile.am b/lib/vdsm/Makefile.am
index 95e236f..45cef02 100644
--- a/lib/vdsm/Makefile.am
+++ b/lib/vdsm/Makefile.am
@@ -38,6 +38,7 @@
pthread.py \
qemuimg.py \
response.py \
+ rwlock.py \
schedule.py \
sslutils.py \
sysctl.py \
diff --git a/lib/vdsm/rwlock.py b/lib/vdsm/rwlock.py
new file mode 100644
index 0000000..900d91d
--- /dev/null
+++ b/lib/vdsm/rwlock.py
@@ -0,0 +1,91 @@
+#
+# Copyright 2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+
+from __future__ import absolute_import
+import threading
+
+
+class RWLock(object):
+
+ def __init__(self):
+ self._lock = threading.Lock()
+ self._waiters = []
+ self._readers = set()
+ self._writer = None
+
+ def acquireWrite(self):
+ with self._lock:
+ if self._writer or self._readers or self._waiters:
+ self._wait(True)
+ self._writer = threading.current_thread()
+
+ def acquireRead(self):
+ with self._lock:
+ if self._writer or self._waiters:
+ self._wait(False)
+ self._readers.add(threading.current_thread())
+
+ def release(self):
+ me = threading.current_thread()
+ with self._lock:
+ if self._writer:
+ if self._writer is not me:
+ raise RuntimeError("Thread %s attempted to release a "
+ "write lock held by thread %s"
+ % (me, self._writer))
+ self._writer = None
+ else:
+ if me not in self._readers:
+ raise RuntimeError("Thread %s attempted to release a "
+ "read lock it does not hold"
+ % (me,))
+ self._readers.remove(me)
+ if self._waiters:
+ self._wakeup_waiter()
+
+ def _wait(self, wants_write):
+ waiter = Waiter(wants_write)
+ self._waiters.append(waiter)
+ try:
+ self._lock.release()
+ try:
+ waiter.wait()
+ finally:
+ self._lock.acquire()
+ finally:
+ self._waiters.remove(waiter)
+
+ def _wakeup_waiter(self):
+ if self._readers and self._waiters[0].wants_write:
+ return
+ self._waiters[0].wakeup()
+
+
+class Waiter(object):
+
+ def __init__(self, wants_write):
+ self.wants_write = wants_write
+ self._event = threading.Event()
+
+ def wait(self):
+ self._event.wait()
+
+ def wakeup(self):
+ self._event.set()
diff --git a/tests/rwlock_test.py b/tests/rwlock_test.py
index 80d04f1..e67e27a 100644
--- a/tests/rwlock_test.py
+++ b/tests/rwlock_test.py
@@ -25,7 +25,7 @@
from testlib import VdsmTestCase
from testValidation import slowtest, stresstest
-from storage.misc import RWLock
+from vdsm.rwlock import RWLock
class RWLockTests(VdsmTestCase):
diff --git a/vdsm.spec.in b/vdsm.spec.in
index 2b41835..35a9670 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -1263,6 +1263,7 @@
%{python_sitelib}/%{vdsm_name}/pthread.py*
%{python_sitelib}/%{vdsm_name}/qemuimg.py*
%{python_sitelib}/%{vdsm_name}/response.py*
+%{python_sitelib}/%{vdsm_name}/rwlock.py*
%{python_sitelib}/%{vdsm_name}/netconfpersistence.py*
%{python_sitelib}/%{vdsm_name}/schedule.py*
%{python_sitelib}/%{vdsm_name}/sslutils.py*
--
To view, visit https://gerrit.ovirt.org/42908
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I2466c137c89598772fb46347eb02195916883cac
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Nir Soffer <nsoffer(a)redhat.com>
Nir Soffer has uploaded a new change for review.
Change subject: lvm: Exclude faulty devices from lvm long filter
......................................................................
lvm: Exclude faulty devices from lvm long filter
lvm commands use filter to limit access to relevant devices. When the
filter includes a faulty device, lvm commands may block for several
minutes (stuck in D state). We have seen getDevicesList command stuck
for up to 10 minutes because of faulty devices in the long filter.
We used to build the filter from all multipath devices. Now we build the
filter only from devices which have at least one active paths.
# multipath -ll
360060160f4a0300038ed7058b5e9e311 dm-0 DGC ,VRAID
size=15G features='0' hwhandler='1 emc' wp=rw
|-+- policy='service-time 0' prio=0 status=enabled
| `- 4:0:3:0 sdd 8:48 failed faulty running
`-+- policy='service-time 0' prio=0 status=enabled
`- 4:0:2:0 sdb 8:16 failed faulty running
360060160f4a030003268ab211002e411 dm-1 DGC ,VRAID
size=30G features='1 queue_if_no_path' hwhandler='1 emc' wp=rw
|-+- policy='service-time 0' prio=4 status=active
| `- 4:0:3:1 sde 8:64 active ready running
`-+- policy='service-time 0' prio=1 status=enabled
`- 4:0:2:1 sdc 8:32 active ready running
Previously, both devices were included in the filter, now only
360060160f4a030003268ab211002e411 will be included in lvm filter.
A faulty device which became active again will be included in lvm filter
after the next refresh (every 5 minutes), or after trying edit or create
a new storage domain.
lvm uses also short filter, including devices used by the certain vg or
lv. It is possible that we also have to exclude such devices from the
short filter. This will be handled later if needed.
Change-Id: I6d7a973bcefa95813fdc289847760c0955aca30c
Bug-Url: https://bugzilla.redhat.com/880738
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
M vdsm/storage/lvm.py
M vdsm/storage/multipath.py
2 files changed, 13 insertions(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/75/31875/1
diff --git a/vdsm/storage/lvm.py b/vdsm/storage/lvm.py
index 86edf55..9cfad01 100644
--- a/vdsm/storage/lvm.py
+++ b/vdsm/storage/lvm.py
@@ -244,7 +244,7 @@
if not self._filterStale:
return self._extraCfg
- self._extraCfg = _buildConfig(multipath.getMPDevNamesIter())
+ self._extraCfg = _buildConfig(multipath.getActiveMPDevNamesIter())
_updateLvmConf(self._extraCfg)
self._filterStale = False
diff --git a/vdsm/storage/multipath.py b/vdsm/storage/multipath.py
index ba98866..2b30995 100644
--- a/vdsm/storage/multipath.py
+++ b/vdsm/storage/multipath.py
@@ -382,6 +382,18 @@
yield os.path.join(devicemapper.DMPATH_PREFIX, name)
+def getActiveMPDevNamesIter():
+ status = devicemapper.getPathsStatus()
+ for dmId, guid in getMPDevsIter():
+ active = [slave for slave in devicemapper.getSlaves(dmId)
+ if status.get(slave) == "active"]
+ if not active:
+ log.warning("Skipping device %s - no active slave", guid)
+ continue
+ log.debug("Found device %s %s", guid, active)
+ yield os.path.join(devicemapper.DMPATH_PREFIX, guid)
+
+
def getMPDevsIter():
"""
Collect the list of all the multipath block devices.
--
To view, visit http://gerrit.ovirt.org/31875
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I6d7a973bcefa95813fdc289847760c0955aca30c
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Nir Soffer <nsoffer(a)redhat.com>
Federico Simoncelli has uploaded a new change for review.
Change subject: lib: add support for utillinux commands
......................................................................
lib: add support for utillinux commands
Change-Id: I2ea7dd19fadc600b8fe78fb436ae430d35f52165
Signed-off-by: Federico Simoncelli <fsimonce(a)redhat.com>
---
M lib/vdsm/Makefile.am
A lib/vdsm/utillinux.py
M tests/Makefile.am
A tests/utillinuxTests.py
M vdsm.spec.in
5 files changed, 132 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/29/35629/1
diff --git a/lib/vdsm/Makefile.am b/lib/vdsm/Makefile.am
index b862e71..4e0868a 100644
--- a/lib/vdsm/Makefile.am
+++ b/lib/vdsm/Makefile.am
@@ -33,6 +33,7 @@
profile.py \
qemuimg.py \
sslutils.py \
+ utillinux.py \
utils.py \
vdscli.py \
virtsparsify.py \
diff --git a/lib/vdsm/utillinux.py b/lib/vdsm/utillinux.py
new file mode 100644
index 0000000..7fb42cf
--- /dev/null
+++ b/lib/vdsm/utillinux.py
@@ -0,0 +1,47 @@
+#
+# Copyright 2014 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+
+import os
+import re
+import signal
+
+from . import utils
+
+_blkdiscard = utils.CommandPath("blkdiscard",
+ "/sbin/blkdiscard",) # Fedora, EL6
+
+
+def blkdiscard(device, offset=None, length=None, secure=False):
+ cmd = [_blkdiscard.cmd]
+
+ if offset:
+ cmd.extend(("-o", str(offset)))
+
+ if length:
+ cmd.extend(("-l", str(length)))
+
+ if secure:
+ cmd.append("-s")
+
+ cmd.append(device)
+ rc, out, err = utils.execCmd(cmd, deathSignal=signal.SIGKILL)
+
+ if rc != 0:
+ raise QImgError(rc, out, err)
diff --git a/tests/Makefile.am b/tests/Makefile.am
index b365921..5aa9fc3 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -79,6 +79,7 @@
testlibTests.py \
toolTests.py \
transportWrapperTests.py \
+ utillinuxTests.py \
utilsTests.py \
vdsClientTests.py \
vmApiTests.py \
diff --git a/tests/utillinuxTests.py b/tests/utillinuxTests.py
new file mode 100644
index 0000000..dbbb50e
--- /dev/null
+++ b/tests/utillinuxTests.py
@@ -0,0 +1,82 @@
+#
+# Copyright 2014 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+
+from testlib import VdsmTestCase as TestCaseBase
+import monkeypatch
+from vdsm import utillinux
+from vdsm import utils
+
+BLKDISCARD = utillinux._blkdiscard.cmd
+
+
+class FakeCmd(object):
+
+ def __init__(self, module, name, *calls):
+ self.patch = monkeypatch.Patch([(module, name, self)])
+ self.calls = list(calls)
+
+ def __call__(self, cmd, **kw):
+ call = self.calls.pop(0)
+ return call(cmd, **kw)
+
+ def __enter__(self):
+ self.patch.apply()
+
+ def __exit__(self, t=None, v=None, tb=None):
+ self.patch.revert()
+
+
+class BlkDiscardTests(TestCaseBase):
+
+ def test_no_options(self):
+ def discard(cmd, **kw):
+ expected = [BLKDISCARD, 'device']
+ self.assertEqual(cmd, expected)
+ return 0, '', ''
+
+ with FakeCmd(utils, 'execCmd', discard):
+ utillinux.blkdiscard('device')
+
+ def test_offset(self):
+ def discard(cmd, **kw):
+ expected = [BLKDISCARD, '-o', '1024', 'device']
+ self.assertEqual(cmd, expected)
+ return 0, '', ''
+
+ with FakeCmd(utils, 'execCmd', discard):
+ utillinux.blkdiscard('device', offset=1024)
+
+ def test_length(self):
+ def discard(cmd, **kw):
+ expected = [BLKDISCARD, '-l', '2048', 'device']
+ self.assertEqual(cmd, expected)
+ return 0, '', ''
+
+ with FakeCmd(utils, 'execCmd', discard):
+ utillinux.blkdiscard('device', length=2048)
+
+ def test_offset_and_length(self):
+ def discard(cmd, **kw):
+ expected = [BLKDISCARD, '-o', '1024', '-l', '2048', 'device']
+ self.assertEqual(cmd, expected)
+ return 0, '', ''
+
+ with FakeCmd(utils, 'execCmd', discard):
+ utillinux.blkdiscard('device', offset=1024, length=2048)
diff --git a/vdsm.spec.in b/vdsm.spec.in
index d15f5c2..befe0e0 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -1252,6 +1252,7 @@
%{python_sitelib}/%{vdsm_name}/qemuimg.py*
%{python_sitelib}/%{vdsm_name}/netconfpersistence.py*
%{python_sitelib}/%{vdsm_name}/sslutils.py*
+%{python_sitelib}/%{vdsm_name}/utillinux.py*
%{python_sitelib}/%{vdsm_name}/utils.py*
%{python_sitelib}/%{vdsm_name}/vdscli.py*
%{python_sitelib}/%{vdsm_name}/virtsparsify.py*
--
To view, visit http://gerrit.ovirt.org/35629
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I2ea7dd19fadc600b8fe78fb436ae430d35f52165
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Federico Simoncelli <fsimonce(a)redhat.com>
Nir Soffer has uploaded a new change for review.
Change subject: lvm: Decrease number of retries before failing
......................................................................
lvm: Decrease number of retries before failing
Previously we configured lvm to stop accessing a device during an lvm
opoeration after 3 errors. This can cause lvm to block 3 times when
trying to access inaccessible device. With current iscsi settings, each
block can be 120 seconds, total 360 seconds. We have seen lvm block for
couple of minutes in such cases.
The retries seems uneeded when working with multiple paths, as multipath
already retry all available paths after SCSI errors on one path. However
when working with single path, multipath should fail after one try.
According to lvm developer this may decrease the time lvm is blocked
when devices are not accesible.
(Not tested yet)
Change-Id: I5d11abaaff45ce86e88c6589264e162318ac1f1d
Relates-To: https://bugzilla.redhat.com/880738
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
M vdsm/storage/lvm.py
1 file changed, 1 insertion(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/56/32356/1
diff --git a/vdsm/storage/lvm.py b/vdsm/storage/lvm.py
index 86edf55..f760f7b 100644
--- a/vdsm/storage/lvm.py
+++ b/vdsm/storage/lvm.py
@@ -106,7 +106,7 @@
preferred_names = ["^/dev/mapper/"]
ignore_suspended_devices=1
write_cache_state=0
-disable_after_error_count=3
+disable_after_error_count=1
obtain_device_list_from_udev=0
%s
}
--
To view, visit http://gerrit.ovirt.org/32356
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I5d11abaaff45ce86e88c6589264e162318ac1f1d
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Nir Soffer <nsoffer(a)redhat.com>
Nir Soffer has uploaded a new change for review.
Change subject: clusterlock: Remove uneeded workaround
......................................................................
clusterlock: Remove uneeded workaround
Sanlock version XXXX had a off-by-one bug when calling get_hosts with a
host id, returning info for the next host. This bug is fixed in version
XXX. Now we can use the hostId parameter, making the call more efficient
and simpligying clusterlock code.
Change-Id: Ide75e749fbc2916540c2b526b78fedc247b5c6f9
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
M vdsm.spec.in
M vdsm/storage/clusterlock.py
2 files changed, 5 insertions(+), 16 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/62/31162/1
diff --git a/vdsm.spec.in b/vdsm.spec.in
index 5ba6fc6..fc39eb9 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -167,7 +167,7 @@
Requires: iscsi-initiator-utils >= 6.2.0.873-21
%endif
-Requires: sanlock >= 2.8, sanlock-python
+Requires: sanlock >= XXX, sanlock-python
%if 0%{?rhel}
Requires: python-ethtool >= 0.6-3
diff --git a/vdsm/storage/clusterlock.py b/vdsm/storage/clusterlock.py
index 24a5d81..17cdd53 100644
--- a/vdsm/storage/clusterlock.py
+++ b/vdsm/storage/clusterlock.py
@@ -265,26 +265,15 @@
return False
def getHostStatus(self, hostId):
- # Note: get_hosts has off-by-one bug when asking for particular host
- # id, so get all hosts info and filter.
- # See https://bugzilla.redhat.com/1111210
try:
- hosts = sanlock.get_hosts(self._sdUUID)
+ hosts = sanlock.get_hosts(self._sdUUID, hostId)
except sanlock.SanlockException as e:
self.log.debug("Unable to get host %d status in lockspace %s: %s",
hostId, self._sdUUID, e)
return HOST_STATUS_UNAVAILABLE
-
- for info in hosts:
- if info['host_id'] == hostId:
- status = info['flags']
- return self.STATUS_NAME[status]
-
- # get_hosts with host_id=0 returns only hosts with timestamp != 0,
- # which means that no host is using this host id now. If there a
- # timestamp, sanlock will return HOST_UNKNOWN and then HOST_LIVE or
- # HOST_FAIL.
- return HOST_STATUS_FREE
+ else:
+ status = hosts[0]['flags']
+ return self.STATUS_NAME[status]
# The hostId parameter is maintained here only for compatibility with
# ClusterLock. We could consider to remove it in the future but keeping it
--
To view, visit http://gerrit.ovirt.org/31162
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ide75e749fbc2916540c2b526b78fedc247b5c6f9
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Nir Soffer <nsoffer(a)redhat.com>