Change in vdsm[ovirt-3.5]: vm: avoid race while setting guest cpu status
by fromani@redhat.com
Hello Dan Kenigsberg, Michal Skrivanek,
I'd like you to do a code review. Please visit
http://gerrit.ovirt.org/32462
to review the following change.
Change subject: vm: avoid race while setting guest cpu status
......................................................................
vm: avoid race while setting guest cpu status
Inside the Vm class, the state swapping is not atomic.
This is because the reported state is function of two internal fields,
lastStatus and guestCpuRunning, which may be updated concurrently
and not atomically.
One clear example for this is BZ1111938, on which we see a
race between pause(), onLibvirtLifeCycle() and getStats(),
which makes a Vm incorrectly reported as 'Paused'
(instead of 'Saving State'), which confuses Engine.
To fix this, we make use of the guestCpuLock everywhere
we change the state of the guest CPU, in order to synchronize
access to the field with respect to pause() and cont() methods.
While good enough for the short term, more aggressive
refactoring is needed in this area.
Change-Id: I3aea96c7122d60e6cb888273678b565c3f3e537f
Bug-Url: https://bugzilla.redhat.com/1111938
Signed-off-by: Francesco Romani <fromani(a)redhat.com>
Reviewed-on: http://gerrit.ovirt.org/29113
Reviewed-by: Michal Skrivanek <michal.skrivanek(a)redhat.com>
Reviewed-by: Dan Kenigsberg <danken(a)redhat.com>
---
M tests/vmTests.py
M vdsm/virt/vm.py
2 files changed, 67 insertions(+), 12 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/62/32462/1
diff --git a/tests/vmTests.py b/tests/vmTests.py
index 1a0a751..aa73a6e 100644
--- a/tests/vmTests.py
+++ b/tests/vmTests.py
@@ -24,12 +24,15 @@
import re
import shutil
import tempfile
+import threading
+import time
import xml.etree.ElementTree as ET
import libvirt
from virt import vm
from virt import vmexitreason
+from virt import vmstatus
from vdsm import constants
from vdsm import define
from testrunner import VdsmTestCase as TestCaseBase
@@ -42,6 +45,8 @@
from vmTestsData import CONF_TO_DOMXML_PPC64
from vmTestsData import CONF_TO_DOMXML_NO_VDSM
+from testValidation import slowtest
+
class ConnectionMock:
def __init__(self, *args):
@@ -52,6 +57,12 @@
def listAllNetworks(self, *args):
return []
+
+
+class FakeClientIF(object):
+ def __init__(self, *args, **kwargs):
+ self.channelListener = None
+ self.vmContainer = {}
class FakeDomain(object):
@@ -902,17 +913,21 @@
@contextmanager
def FakeVM(params=None, devices=None, runCpu=False,
- arch=caps.Architecture.X86_64):
+ arch=caps.Architecture.X86_64, status=None):
with namedTemporaryDir() as tmpDir:
with MonkeyPatchScope([(constants, 'P_VDSM_RUN', tmpDir + '/'),
(libvirtconnection, 'get', ConnectionMock)]):
vmParams = {'vmId': 'TESTING'}
vmParams.update({} if params is None else params)
- fake = vm.Vm(None, vmParams)
+ cif = FakeClientIF()
+ fake = vm.Vm(cif, vmParams)
+ cif.vmContainer[fake.id] = fake
fake.arch = arch
fake.guestAgent = FakeGuestAgent()
fake.conf['devices'] = [] if devices is None else devices
fake._guestCpuRunning = runCpu
+ if status is not None:
+ fake._lastStatus = status
yield fake
@@ -1311,3 +1326,32 @@
with MonkeyPatchScope([(vm, '_listDomains',
lambda: self._getAllDomains('novdsm'))]):
self.assertFalse(vm.getVDSMDomains())
+
+
+class TestVmStatusTransitions(TestCaseBase):
+ @slowtest
+ def testSavingState(self):
+ with FakeVM(runCpu=True, status=vmstatus.UP) as vm:
+ vm._dom = FakeDomain(domState=libvirt.VIR_DOMAIN_RUNNING)
+
+ def _asyncEvent():
+ vm._onLibvirtLifecycleEvent(
+ libvirt.VIR_DOMAIN_EVENT_SUSPENDED,
+ -1, -1)
+
+ t = threading.Thread(target=_asyncEvent)
+ t.daemon = True
+
+ def _fireAsyncEvent(*args):
+ t.start()
+ time.sleep(0.5)
+ # pause the main thread to let the event one run
+
+ with MonkeyPatchScope([(vm, '_underlyingPause', _fireAsyncEvent)]):
+ self.assertEqual(vm.status()['status'], vmstatus.UP)
+ vm.pause(vmstatus.SAVING_STATE)
+ self.assertEqual(vm.status()['status'], vmstatus.SAVING_STATE)
+ t.join()
+ self.assertEqual(vm.status()['status'], vmstatus.SAVING_STATE)
+ # state must not change even after we are sure the event was
+ # handled
diff --git a/vdsm/virt/vm.py b/vdsm/virt/vm.py
index 9e4437a..91b7400 100644
--- a/vdsm/virt/vm.py
+++ b/vdsm/virt/vm.py
@@ -2722,8 +2722,8 @@
self.log.error('cannot cont while %s', self.lastStatus)
return errCode['unexpected']
self._underlyingCont()
- if hasattr(self, 'updateGuestCpuRunning'):
- self.updateGuestCpuRunning()
+ self._setGuestCpuRunning(self._isDomainRunning(),
+ guestCpuLocked=True)
self._lastStatus = afterState
try:
with self._confLock:
@@ -2743,10 +2743,24 @@
with self._confLock:
self.conf['pauseCode'] = pauseCode
self._underlyingPause()
- if hasattr(self, 'updateGuestCpuRunning'):
- self.updateGuestCpuRunning()
+ self._setGuestCpuRunning(self._isDomainRunning(),
+ guestCpuLocked=True)
self._lastStatus = afterState
return {'status': doneCode, 'output': ['']}
+ finally:
+ if not guestCpuLocked:
+ self._guestCpuLock.release()
+
+ def _setGuestCpuRunning(self, isRunning, guestCpuLocked=False):
+ """
+ here we want to synchronize the access to guestCpuRunning
+ made by callback with the pause/cont methods.
+ To do so we reuse guestCpuLocked.
+ """
+ if not guestCpuLocked:
+ self._acquireCpuLockWithTimeout()
+ try:
+ self._guestCpuRunning = isRunning
finally:
if not guestCpuLocked:
self._guestCpuLock.release()
@@ -3173,9 +3187,6 @@
return False
else:
return status[0] == libvirt.VIR_DOMAIN_RUNNING
-
- def updateGuestCpuRunning(self):
- self._guestCpuRunning = self._isDomainRunning()
def _getUnderlyingVmDevicesInfo(self):
"""
@@ -4778,7 +4789,7 @@
self.log.info('abnormal vm stop device %s error %s',
blockDevAlias, err)
self.conf['pauseCode'] = err.upper()
- self._guestCpuRunning = False
+ self._setGuestCpuRunning(False)
if err.upper() == 'ENOSPC':
if not self.extendDrivesIfNeeded():
self.log.info("No VM drives were extended")
@@ -5475,7 +5486,7 @@
self._shutdownReason = vmexitreason.USER_SHUTDOWN
self._onQemuDeath()
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
- self._guestCpuRunning = False
+ self._setGuestCpuRunning(False)
if detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_PAUSED:
# Libvirt sometimes send the SUSPENDED/SUSPENDED_PAUSED event
# after RESUMED/RESUMED_MIGRATED (when VM status is PAUSED
@@ -5490,7 +5501,7 @@
hooks.after_vm_pause(domxml, self.conf)
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
- self._guestCpuRunning = True
+ self._setGuestCpuRunning(True)
if detail == libvirt.VIR_DOMAIN_EVENT_RESUMED_UNPAUSED:
# This is not a real solution however the safest way to handle
# this for now. Ultimately we need to change the way how we are
--
To view, visit http://gerrit.ovirt.org/32462
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I3aea96c7122d60e6cb888273678b565c3f3e537f
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: ovirt-3.5
Gerrit-Owner: Francesco Romani <fromani(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Michal Skrivanek <michal.skrivanek(a)redhat.com>
9 years, 9 months
Change in vdsm[ovirt-3.5]: storage domain detach with force fails
by Jenkins CI RO
oVirt Jenkins CI Server has posted comments on this change.
Change subject: storage domain detach with force fails
......................................................................
Patch Set 3:
Build Successful
http://jenkins.ovirt.org/job/vdsm_3.5_create-rpms_merged/147/ : SUCCESS
--
To view, visit http://gerrit.ovirt.org/32503
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: comment
Gerrit-Change-Id: I17058f86448c0019520ea9315da5524c5b3f2b6c
Gerrit-PatchSet: 3
Gerrit-Project: vdsm
Gerrit-Branch: ovirt-3.5
Gerrit-Owner: Piotr Kliczewski <piotr.kliczewski(a)gmail.com>
Gerrit-Reviewer: Allon Mureinik <amureini(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Maor Lipchuk <mlipchuk(a)redhat.com>
Gerrit-Reviewer: Piotr Kliczewski <piotr.kliczewski(a)gmail.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
Gerrit-Reviewer: Yaniv Bronhaim <ybronhei(a)redhat.com>
Gerrit-Reviewer: automation(a)ovirt.org
Gerrit-Reviewer: oVirt Jenkins CI Server
Gerrit-HasComments: No
9 years, 9 months
Change in vdsm[ovirt-3.5]: storage domain detach with force fails
by Dan Kenigsberg
Dan Kenigsberg has submitted this change and it was merged.
Change subject: storage domain detach with force fails
......................................................................
storage domain detach with force fails
In jsonrpc schema masterSdUUID and masterVersion are marked as optional.
We need to provide default values for both arguments.
Bug-Url: https://bugzilla.redhat.com/1110831
Change-Id: I17058f86448c0019520ea9315da5524c5b3f2b6c
Signed-off-by: pkliczewski <piotr.kliczewski(a)gmail.com>
Reviewed-on: http://gerrit.ovirt.org/32503
Reviewed-by: Dan Kenigsberg <danken(a)redhat.com>
---
M vdsm/API.py
1 file changed, 1 insertion(+), 1 deletion(-)
Approvals:
Piotr Kliczewski: Verified
Dan Kenigsberg: Looks good to me, approved
--
To view, visit http://gerrit.ovirt.org/32503
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: merged
Gerrit-Change-Id: I17058f86448c0019520ea9315da5524c5b3f2b6c
Gerrit-PatchSet: 3
Gerrit-Project: vdsm
Gerrit-Branch: ovirt-3.5
Gerrit-Owner: Piotr Kliczewski <piotr.kliczewski(a)gmail.com>
Gerrit-Reviewer: Allon Mureinik <amureini(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Maor Lipchuk <mlipchuk(a)redhat.com>
Gerrit-Reviewer: Piotr Kliczewski <piotr.kliczewski(a)gmail.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
Gerrit-Reviewer: Yaniv Bronhaim <ybronhei(a)redhat.com>
Gerrit-Reviewer: automation(a)ovirt.org
9 years, 9 months
Change in vdsm[ovirt-3.5]: storage domain detach with force fails
by Dan Kenigsberg
Dan Kenigsberg has posted comments on this change.
Change subject: storage domain detach with force fails
......................................................................
Patch Set 2: Code-Review+2
--
To view, visit http://gerrit.ovirt.org/32503
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: comment
Gerrit-Change-Id: I17058f86448c0019520ea9315da5524c5b3f2b6c
Gerrit-PatchSet: 2
Gerrit-Project: vdsm
Gerrit-Branch: ovirt-3.5
Gerrit-Owner: Piotr Kliczewski <piotr.kliczewski(a)gmail.com>
Gerrit-Reviewer: Allon Mureinik <amureini(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Maor Lipchuk <mlipchuk(a)redhat.com>
Gerrit-Reviewer: Piotr Kliczewski <piotr.kliczewski(a)gmail.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
Gerrit-Reviewer: Yaniv Bronhaim <ybronhei(a)redhat.com>
Gerrit-Reviewer: automation(a)ovirt.org
Gerrit-HasComments: No
9 years, 9 months
Change in vdsm[master]: storage domain detach with force fails
by Dan Kenigsberg
Dan Kenigsberg has submitted this change and it was merged.
Change subject: storage domain detach with force fails
......................................................................
storage domain detach with force fails
In jsonrpc schema masterSdUUID and masterVersion are marked as optional.
We need to provide default values for both arguments.
Bug-Url: https://bugzilla.redhat.com/1110831
Change-Id: I17058f86448c0019520ea9315da5524c5b3f2b6c
Signed-off-by: pkliczewski <piotr.kliczewski(a)gmail.com>
Reviewed-on: http://gerrit.ovirt.org/32457
Reviewed-by: Allon Mureinik <amureini(a)redhat.com>
Tested-by: Maor Lipchuk <mlipchuk(a)redhat.com>
Reviewed-by: Dan Kenigsberg <danken(a)redhat.com>
---
M vdsm/API.py
1 file changed, 1 insertion(+), 1 deletion(-)
Approvals:
Piotr Kliczewski: Verified
Allon Mureinik: Looks good to me, but someone else must approve
Dan Kenigsberg: Looks good to me, approved
Maor Lipchuk: Verified
--
To view, visit http://gerrit.ovirt.org/32457
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: merged
Gerrit-Change-Id: I17058f86448c0019520ea9315da5524c5b3f2b6c
Gerrit-PatchSet: 3
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Piotr Kliczewski <piotr.kliczewski(a)gmail.com>
Gerrit-Reviewer: Allon Mureinik <amureini(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Maor Lipchuk <mlipchuk(a)redhat.com>
Gerrit-Reviewer: Piotr Kliczewski <piotr.kliczewski(a)gmail.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
Gerrit-Reviewer: Yaniv Bronhaim <ybronhei(a)redhat.com>
Gerrit-Reviewer: automation(a)ovirt.org
Gerrit-Reviewer: oVirt Jenkins CI Server
9 years, 9 months
Change in vdsm[master]: storage domain detach with force fails
by Dan Kenigsberg
Dan Kenigsberg has posted comments on this change.
Change subject: storage domain detach with force fails
......................................................................
Patch Set 2: Code-Review+2
--
To view, visit http://gerrit.ovirt.org/32457
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: comment
Gerrit-Change-Id: I17058f86448c0019520ea9315da5524c5b3f2b6c
Gerrit-PatchSet: 2
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Piotr Kliczewski <piotr.kliczewski(a)gmail.com>
Gerrit-Reviewer: Allon Mureinik <amureini(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Maor Lipchuk <mlipchuk(a)redhat.com>
Gerrit-Reviewer: Piotr Kliczewski <piotr.kliczewski(a)gmail.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
Gerrit-Reviewer: Yaniv Bronhaim <ybronhei(a)redhat.com>
Gerrit-Reviewer: automation(a)ovirt.org
Gerrit-Reviewer: oVirt Jenkins CI Server
Gerrit-HasComments: No
9 years, 9 months
Change in vdsm[master]: storage domain detach with force fails
by Maor Lipchuk
Maor Lipchuk has posted comments on this change.
Change subject: storage domain detach with force fails
......................................................................
Patch Set 2: Verified+1
--
To view, visit http://gerrit.ovirt.org/32457
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: comment
Gerrit-Change-Id: I17058f86448c0019520ea9315da5524c5b3f2b6c
Gerrit-PatchSet: 2
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Piotr Kliczewski <piotr.kliczewski(a)gmail.com>
Gerrit-Reviewer: Allon Mureinik <amureini(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Maor Lipchuk <mlipchuk(a)redhat.com>
Gerrit-Reviewer: Piotr Kliczewski <piotr.kliczewski(a)gmail.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
Gerrit-Reviewer: Yaniv Bronhaim <ybronhei(a)redhat.com>
Gerrit-Reviewer: automation(a)ovirt.org
Gerrit-Reviewer: oVirt Jenkins CI Server
Gerrit-HasComments: No
9 years, 9 months
Change in vdsm[master]: iscsi and localfs functional tests integrated
by ykleinbe@redhat.com
Yoav Kleinberger has uploaded a new change for review.
Change subject: iscsi and localfs functional tests integrated
......................................................................
iscsi and localfs functional tests integrated
Change-Id: I483d57fbab9e3a804b99bb8353f3885ca72ecf38
Signed-off-by: Yoav Kleinberger <ykleinbe(a)redhat.com>
---
M tests/functional/new/basic_storage_domain_test.py
M tests/functional/new/testlib/testcontexts/base.py
M tests/functional/new/testlib/testcontexts/iscsi.py
M tests/functional/new/testlib/testcontexts/localfs.py
4 files changed, 118 insertions(+), 136 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/73/32473/1
diff --git a/tests/functional/new/basic_storage_domain_test.py b/tests/functional/new/basic_storage_domain_test.py
index b50f2a9..573f20c 100644
--- a/tests/functional/new/basic_storage_domain_test.py
+++ b/tests/functional/new/basic_storage_domain_test.py
@@ -1,10 +1,9 @@
-import sys
-import uuid
import storage.volume
import storage.image
import logging
-from testlib import vdsmtestcontext
from testlib import controlvdsm
+from testlib.testcontexts import localfs
+from testlib.testcontexts import iscsi
class TestBasicStorageDomain:
@classmethod
@@ -15,8 +14,14 @@
controlVDSM = controlvdsm.ControlVDSM()
controlVDSM.cleanup()
- def test_create_volume(self):
- with vdsmtestcontext.vdsmTestContext('iscsi') as (vdsm, verify):
+ def testCreateVolumeLocalFS(self):
+ self._testCreateVolume(localfs.LocalFS)
+
+ def testCreateVolumeISCSI(self):
+ self._testCreateVolume(iscsi.ISCSI)
+
+ def _testCreateVolume(self, storageContext):
+ with storageContext() as (vdsm, verify):
storageServerID = vdsm.connectStorageServer()
verify.storageServerConnected()
@@ -26,12 +31,12 @@
poolID = vdsm.createStoragePool()
verify.storagePoolCreated(poolID, masterDomainID = domainID)
- vdsm.connectStoragePool()
- vdsm.spmStart()
+ vdsm.connectStoragePool(poolID, masterDomainID = domainID)
+ vdsm.spmStart(poolID)
verify.spmStarted(poolID)
vdsm.activateStorageDomain(domainID, poolID)
GIGABYTE = 1024 ** 3
- taskID = vdsm.createVolume(1 * GIGABYTE)
- verify.volumeCreated(taskID)
+ volumeInfo = vdsm.createVolume(1 * GIGABYTE)
+ verify.volumeCreated(volumeInfo)
vdsm.disconnectStorageServer()
diff --git a/tests/functional/new/testlib/testcontexts/base.py b/tests/functional/new/testlib/testcontexts/base.py
index 53c5ff8..6cf67c2 100644
--- a/tests/functional/new/testlib/testcontexts/base.py
+++ b/tests/functional/new/testlib/testcontexts/base.py
@@ -1,13 +1,14 @@
import os
+import vdsm.vdscli
+import vdsm.config
+import random
+import uuid
import logging
import time
class Verify:
-#####
- def embed(self,msg):
- print 'embedding: %s' % msg
- import IPython
- IPython.embed()
+ def __init__(self, vdsm):
+ self._vdsm = vdsm
def assertPathExists(self, path, link = False):
logging.info('verifying path: %s' % path)
@@ -22,8 +23,8 @@
else:
assert not os.path.exists(path)
- def waitForVDSMToFinishTask(self):
- time.sleep(1)
+ def waitForVDSMToFinishTask(self, duration = 1):
+ time.sleep(duration)
def storagePoolCreated(self, poolID, masterDomainID):
self.waitForVDSMToFinishTask()
@@ -60,3 +61,48 @@
return False
return True
+
+ def waitOnVDSMTask(self, taskID, timeout):
+ taskFinished = lambda: (self._taskStatus(taskID)['taskState'] == 'finished')
+ self.waitFor(taskFinished, 'vdsm task to be finished', timeout)
+ taskStatus = self._taskStatus(taskID)
+ assert taskStatus['code'] == 0, taskStatus['message']
+
+ def _taskStatus(self, taskID):
+ result = self._vdsm.getTaskStatus(taskID)
+ assert result['status']['code'] == 0
+ taskStatus = result['taskStatus']
+ return taskStatus
+
+class StorageBackend:
+ def __init__(self):
+ useSSL = vdsm.config.config.getboolean('vars', 'ssl')
+ self._vdsm = vdsm.vdscli.connect(useSSL=useSSL)
+
+ def newUUID(self):
+ return str(uuid.uuid4())
+
+ def randomName(self, base):
+ return "%s_%04d" % (base, random.randint(1,10000))
+
+ def connectStoragePool(self, poolID, masterDomainID):
+ SCSI_KEY_DEPRECATED = 0
+ result = self._vdsm.connectStoragePool(poolID, 1, SCSI_KEY_DEPRECATED, masterDomainID, 1)
+ self.verifyVDSMSuccess(result)
+
+ def spmStart(self, poolID):
+ RECOVERY_MODE_DEPRECATED = 0
+ SCSI_FENCING_DEPRECATED = 0
+ result = self._vdsm.spmStart(poolID, -1, '-1', SCSI_FENCING_DEPRECATED, RECOVERY_MODE_DEPRECATED)
+ self.verifyVDSMSuccess(result)
+
+ def activateStorageDomain(self, domainID, poolID):
+ result = self._vdsm.activateStorageDomain(domainID,poolID)
+ self.verifyVDSMSuccess(result)
+
+ def stringForXMLRPC(self, number):
+ return str(number)
+
+ def verifyVDSMSuccess(self, result):
+ if result[ 'status' ][ 'code' ] != 0:
+ raise Exception('expected OK result from VDSM, got "%s" instead' % str(result))
diff --git a/tests/functional/new/testlib/testcontexts/iscsi.py b/tests/functional/new/testlib/testcontexts/iscsi.py
index 2df2429..c0bac7f 100644
--- a/tests/functional/new/testlib/testcontexts/iscsi.py
+++ b/tests/functional/new/testlib/testcontexts/iscsi.py
@@ -1,26 +1,20 @@
import os
import random
import logging
-import uuid
-import storage.sd
-import vdsm.vdscli
-import vdsm.config
import tempfile
-import pwd
import shutil
-import os
-import time
import subprocess
import glob
+import storage.sd
import storage.volume
import storage.image
from . import base
class Verify(base.Verify):
def __init__(self, iqn, volumeGroup, vdsm, volumeID):
+ base.Verify.__init__(self, vdsm)
self._iqn = iqn
self._volumeGroup = volumeGroup
- self._vdsm = vdsm
self._volumeID = volumeID
def storageServerConnected(self):
@@ -53,36 +47,23 @@
pass
def volumeCreated(self, taskID):
- taskFinished = lambda: (self._taskStatus(taskID)['taskState'] == 'finished')
- self.waitFor(taskFinished, 'createVolume task to be finished', 10)
- taskStatus = self._taskStatus(taskID)
- assert taskStatus['code'] == 0, taskStatus['message']
+ self.waitOnVDSMTask(taskID, 10)
result = subprocess.call('sudo lvs %s | grep %s' % (self._volumeGroup['uuid'], self._volumeID), shell = True)
assert result == 0, "did not find logical volume in volume group"
- def _taskStatus(self, taskID):
- result = self._vdsm.getTaskStatus(taskID)
- assert result['status']['code'] == 0
- taskStatus = result['taskStatus']
- return taskStatus
-
-class ISCSI:
+class ISCSI(base.StorageBackend):
_NULL_UUID = '00000000-0000-0000-0000-000000000000'
def __init__(self):
- useSSL = vdsm.config.config.getboolean('vars', 'ssl')
- self._vdsm = vdsm.vdscli.connect(useSSL=useSSL)
+ base.StorageBackend.__init__(self)
self._iqn = 'iqn.1970-01.functional.test:%04d' % random.randint(1,10000)
- self._volumeGroup = { 'uuid': self._newUUID(), 'vgs_uuid': None }
- self._poolID = self._newUUID()
- self._connectionID = self._newUUID()
- self._imageID = self._newUUID()
- self._volumeID = self._newUUID()
+ self._volumeGroup = { 'uuid': self.newUUID(), 'vgs_uuid': None }
+ self._poolID = self.newUUID()
+ self._connectionID = self.newUUID()
+ self._imageID = self.newUUID()
+ self._volumeID = self.newUUID()
logging.info( 'using the following attributes:' )
for name in [ '_volumeGroup', '_iqn', '_poolID', '_connectionID', '_imageID', '_volumeID' ]:
logging.info( '%s => %s' % ( name, getattr(self,name) ) )
-
- def _randomName(self, base):
- return "%s_%04d" % (base, random.randint(1,10000))
def _targetcli(self, command):
commandAsList = command.split()
@@ -92,9 +73,9 @@
def __enter__(self):
self._testDirectory = tempfile.mkdtemp()
self._storageFile = os.path.join(self._testDirectory, 'testfile')
- self._fileioBackstore = self._randomName('backfile')
+ self._fileioBackstore = self.randomName('backfile')
logging.info('using %s, %s' % (self._fileioBackstore, self._storageFile))
- self._targetcli('/backstores/fileio create %s %s 30G' % (self._fileioBackstore, self._storageFile))
+ self._targetcli('/backstores/fileio create %s %s 10G' % (self._fileioBackstore, self._storageFile))
self._targetcli('/iscsi create %s' % self._iqn)
self._targetcli('/iscsi/%s/tpg1/luns create /backstores/fileio/%s' % (self._iqn, self._fileioBackstore))
self._targetcli('/iscsi/%s/tpg1 set attribute authentication=0 demo_mode_write_protect=0' % self._iqn)
@@ -150,27 +131,12 @@
def createStoragePool(self):
POOL_TYPE_DEPRECATED = 0
- result = self._vdsm.createStoragePool(0, self._poolID, self._randomName('pool'), self._domainID(), [ self._domainID() ], 1)
+ result = self._vdsm.createStoragePool(POOL_TYPE_DEPRECATED, self._poolID, self.randomName('pool'), self._domainID(), [ self._domainID() ], 1)
self.verifyVDSMSuccess(result)
return self._poolID
def _domainID(self):
return self._volumeGroup['uuid']
-
- def connectStoragePool(self):
- SCSI_KEY_DEPRECATED = 0
- result = self._vdsm.connectStoragePool(self._poolID, 1, SCSI_KEY_DEPRECATED, self._domainID(), 1)
- self.verifyVDSMSuccess(result)
-
- def spmStart(self):
- RECOVERY_MODE_DEPRECATED = 0
- SCSI_FENCING_DEPRECATED = 0
- result = self._vdsm.spmStart(self._poolID, -1, '-1', SCSI_FENCING_DEPRECATED, RECOVERY_MODE_DEPRECATED)
- self.verifyVDSMSuccess(result)
-
- def activateStorageDomain(self, domainID, poolID):
- result = self._vdsm.activateStorageDomain(domainID,poolID)
- self.verifyVDSMSuccess(result)
def _createVG(self):
lun = self._findLUN()
@@ -180,40 +146,22 @@
self._volumeGroup[ 'vgs_uuid' ] = result[ 'uuid' ]
self._lunGUID = lun['GUID']
- def _newUUID(self):
- return str(uuid.uuid4())
-
def createStorageDomain(self):
self._createVG()
self._createStorageDomain()
return self._volumeGroup['uuid']
- def embed(self,msg):
- print 'embedding: %s' % msg
- import IPython
- IPython.embed()
-
def _createStorageDomain(self):
domainID = self._volumeGroup['uuid']
DOMAIN_VERSION = 3
- domainName = self._randomName( 'some_name' )
+ domainName = self.randomName( 'some_name' )
result = self._vdsm.createStorageDomain(storage.sd.ISCSI_DOMAIN, domainID, domainName, self._volumeGroup['vgs_uuid'], storage.sd.DATA_DOMAIN, DOMAIN_VERSION)
self.verifyVDSMSuccess(result)
def createVolume(self, size):
PREALLOCATE = 1
result = self._vdsm.createVolume(self._domainID(), self._poolID, self._imageID,
- self._stringForXMLRPC(size), storage.volume.RAW_FORMAT, PREALLOCATE, storage.image.DATA_DISK_TYPE, self._volumeID, self._randomName('description'))
+ self.stringForXMLRPC(size), storage.volume.RAW_FORMAT, PREALLOCATE, storage.image.DATA_DISK_TYPE, self._volumeID, self.randomName('iscsi_description'))
logging.info( 'createVolume result: %s' % result)
self.verifyVDSMSuccess(result)
return result[ 'uuid' ]
-
- def _stringForXMLRPC(self, number):
- return str(number)
-
- def verifyVDSMSuccess(self, result):
- if result[ 'status' ][ 'code' ] != 0:
- raise Exception('expected OK result from VDSM, got "%s" instead' % str(result))
-
- def formatStorageDomain(self, domainID):
- self._vdsm.formatStorageDomain(domainID)
diff --git a/tests/functional/new/testlib/testcontexts/localfs.py b/tests/functional/new/testlib/testcontexts/localfs.py
index d1cadcd..ebfa239 100644
--- a/tests/functional/new/testlib/testcontexts/localfs.py
+++ b/tests/functional/new/testlib/testcontexts/localfs.py
@@ -1,41 +1,36 @@
-import os
-import uuid
-import storage.sd
-import vdsm.vdscli
-import vdsm.config
import tempfile
import pwd
import shutil
import os
-import time
+import storage.sd
+import storage.volume
+import storage.image
from . import base
class Verify(base.Verify):
- def __init__(self, directory):
+ def __init__(self, directory, vdsm):
+ base.Verify.__init__(self, vdsm)
self._directory = directory
- def _waitForVDSMToFinishTask(self):
- time.sleep(1)
-
def storageServerConnected(self):
- self._waitForVDSMToFinishTask()
+ self.waitForVDSMToFinishTask(2)
transformedDirectory = self._directory.replace( '/', '_' )
expectedSymlink = os.path.join('/rhev/data-center/mnt/', transformedDirectory)
self.assertPathExists(expectedSymlink, link = True)
def storageDomainGone(self, domainID):
- self._waitForVDSMToFinishTask()
+ self.waitForVDSMToFinishTask()
domainRoot = os.path.join(self._directory, domainID)
self.assertPathDoesNotExist(domainRoot)
def storageServerDisconnected(self):
- self._waitForVDSMToFinishTask()
+ self.waitForVDSMToFinishTask()
transformedDirectory = self._directory.replace( '/', '_' )
expectedSymlink = os.path.join('/rhev/data-center/mnt/', transformedDirectory)
self.assertPathDoesNotExist(expectedSymlink)
def storageDomainCreated(self, domainID):
- self._waitForVDSMToFinishTask()
+ self.waitForVDSMToFinishTask()
self._directIOTestFileExists()
domainRoot = os.path.join(self._directory, domainID)
expectedFiles = [ 'images', 'dom_md', 'dom_md/leases', 'dom_md/inbox', 'dom_md/outbox', 'dom_md/metadata', 'dom_md/ids' ]
@@ -44,12 +39,13 @@
assert os.path.exists(path)
def _directIOTestFileExists(self):
- self._waitForVDSMToFinishTask()
+ self.waitForVDSMToFinishTask()
directIOTestFile = os.path.join(self._directory, '__DIRECT_IO_TEST__')
self.assertPathExists(directIOTestFile)
- def volumeCreated(self, domainID, imageID, volumeID):
- self._waitForVDSMToFinishTask()
+ def volumeCreated(self, volumeInfo):
+ taskID, domainID, imageID, volumeID = volumeInfo
+ self.waitOnVDSMTask(taskID, 20)
domain_directory = os.path.join(self._directory, domainID)
image_directory = os.path.join(domain_directory, 'images', imageID)
self.assertPathExists(image_directory)
@@ -59,15 +55,19 @@
for path in volume_file, volume_lease, volume_meta:
self.assertPathExists(path)
-class LocalFS:
+class LocalFS(base.StorageBackend):
_NON_EXISTANT_POOL = '00000000-0000-0000-0000-000000000000'
def __init__(self):
- useSSL = vdsm.config.config.getboolean('vars', 'ssl')
- self._vdsm = vdsm.vdscli.connect(useSSL=useSSL)
+ base.StorageBackend.__init__(self)
+ self._domainID = self.newUUID()
+ self._poolID = self.newUUID()
+ self._imageID = self.newUUID()
+ self._volumeID = self.newUUID()
+ self._connectionID = self.newUUID()
def __enter__(self):
self._createDirectoryForLocalFSStorage()
- return self, Verify(self._directory)
+ return self, Verify(self._directory, self._vdsm)
def _createDirectoryForLocalFSStorage(self):
self._directory = tempfile.mkdtemp('', 'localfstest', '/var/tmp')
@@ -79,54 +79,37 @@
shutil.rmtree(self._directory)
def connectStorageServer(self):
- storageServerID = str(uuid.uuid4())
- localFilesystemConnection = {'connection': self._directory, 'iqn': '', 'user': '', 'tpgt': '1', 'password': '******', 'id': storageServerID, 'port': ''}
+ localFilesystemConnection = {'connection': self._directory, 'iqn': '', 'user': '', 'tpgt': '1', 'password': '******', 'id': self._connectionID, 'port': ''}
result = self._vdsm.connectStorageServer(
storage.sd.LOCALFS_DOMAIN,
self._NON_EXISTANT_POOL,
[ localFilesystemConnection ])
self.verifyVDSMSuccess(result)
- assert result[ 'statuslist' ][ 0 ][ 'id' ] == storageServerID
- return storageServerID
+ assert result[ 'statuslist' ][ 0 ][ 'id' ] == self._connectionID
- def disconnectStorageServer(self, storageServerID):
- localFilesystemConnection = {'connection': self._directory, 'iqn': '', 'user': '', 'tpgt': '1', 'password': '******', 'id': storageServerID, 'port': ''}
+ def disconnectStorageServer(self):
+ localFilesystemConnection = {'connection': self._directory, 'iqn': '', 'user': '', 'tpgt': '1', 'password': '******', 'id': self._connectionID, 'port': ''}
result = self._vdsm.disconnectStorageServer(
storage.sd.LOCALFS_DOMAIN,
self._NON_EXISTANT_POOL,
[ localFilesystemConnection ])
self.verifyVDSMSuccess(result)
- def createStoragePool(self, name, id, masterDomainID, domainIDs):
+ def createStoragePool(self):
POOL_TYPE_DEPRECATED = 0
- result = self._vdsm.createStoragePool(0, id, name, masterDomainID, domainIDs, 1)
+ result = self._vdsm.createStoragePool(POOL_TYPE_DEPRECATED, self._poolID, self.randomName('pool'), self._domainID, [ self._domainID ], 1)
self.verifyVDSMSuccess(result)
+ return self._poolID
- def connectStoragePool(self, id, masterDomainID):
- SCSI_KEY_DEPRECATED = 0
- result = self._vdsm.connectStoragePool(id, 1, SCSI_KEY_DEPRECATED, masterDomainID, 1)
- self.verifyVDSMSuccess(result)
-
- def spmStart(self, poolID):
- RECOVERY_MODE_DEPRECATED = 0
- SCSI_FENCING_DEPRECATED = 0
- result = self._vdsm.spmStart(poolID, -1, '-1', SCSI_FENCING_DEPRECATED, RECOVERY_MODE_DEPRECATED)
- self.verifyVDSMSuccess(result)
-
- def createLocalFilesystemStorageDomain(self):
- domainID = str(uuid.uuid4())
+ def createStorageDomain(self):
DOMAIN_VERSION = 3
- result = self._vdsm.createStorageDomain(storage.sd.LOCALFS_DOMAIN, domainID, 'some_name', self._directory, storage.sd.DATA_DOMAIN, DOMAIN_VERSION)
+ result = self._vdsm.createStorageDomain(storage.sd.LOCALFS_DOMAIN, self._domainID, 'some_name', self._directory, storage.sd.DATA_DOMAIN, DOMAIN_VERSION)
self.verifyVDSMSuccess(result)
- return domainID
+ return self._domainID
- def createVolume(self, domainID, poolID, imageID, size, format, preallocate, diskType, volumeID, description):
- result = self._vdsm.createVolume(domainID, poolID, imageID, size, format, preallocate, diskType, volumeID, description)
+ def createVolume(self, size):
+ PREALLOCATE = 1
+ result = self._vdsm.createVolume(self._domainID, self._poolID, self._imageID, self.stringForXMLRPC(size), storage.volume.RAW_FORMAT, PREALLOCATE, storage.image.DATA_DISK_TYPE, self._volumeID, self.randomName('localfs_description'))
self.verifyVDSMSuccess(result)
-
- def verifyVDSMSuccess(self, result):
- if result[ 'status' ][ 'code' ] != 0:
- raise Exception('expected OK result from VDSM, got "%s" instead' % str(result))
-
- def formatStorageDomain(self, domainID):
- self._vdsm.formatStorageDomain(domainID)
+ taskID = result[ 'uuid' ]
+ return taskID, self._domainID, self._imageID, self._volumeID
--
To view, visit http://gerrit.ovirt.org/32473
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I483d57fbab9e3a804b99bb8353f3885ca72ecf38
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Yoav Kleinberger <ykleinbe(a)redhat.com>
9 years, 9 months
Change in vdsm[master]: storage domain detach with force fails
by Dan Kenigsberg
Dan Kenigsberg has posted comments on this change.
Change subject: storage domain detach with force fails
......................................................................
Patch Set 2:
Please detail how this was verified. I'd expect a serious verification of http://gerrit.ovirt.org/32212 to have caught this issue, too.
--
To view, visit http://gerrit.ovirt.org/32457
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: comment
Gerrit-Change-Id: I17058f86448c0019520ea9315da5524c5b3f2b6c
Gerrit-PatchSet: 2
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Piotr Kliczewski <piotr.kliczewski(a)gmail.com>
Gerrit-Reviewer: Allon Mureinik <amureini(a)redhat.com>
Gerrit-Reviewer: Dan Kenigsberg <danken(a)redhat.com>
Gerrit-Reviewer: Piotr Kliczewski <piotr.kliczewski(a)gmail.com>
Gerrit-Reviewer: Saggi Mizrahi <smizrahi(a)redhat.com>
Gerrit-Reviewer: Yaniv Bronhaim <ybronhei(a)redhat.com>
Gerrit-Reviewer: automation(a)ovirt.org
Gerrit-Reviewer: oVirt Jenkins CI Server
Gerrit-HasComments: No
9 years, 9 months