Change in vdsm[master]: incorporate stop/start logic to functional tests
by ykleinbe@redhat.com
Yoav Kleinberger has uploaded a new change for review.
Change subject: incorporate stop/start logic to functional tests
......................................................................
incorporate stop/start logic to functional tests
Change-Id: I55b86578a681de2b09ac5ee01ca2e3b9443ed13a
Signed-off-by: Yoav Kleinberger <ykleinbe(a)redhat.com>
---
M tests/functional/new/basicLocalFSStorageDomainTest.py
A tests/functional/new/controlvdsm.py
2 files changed, 65 insertions(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/23/29423/1
diff --git a/tests/functional/new/basicLocalFSStorageDomainTest.py b/tests/functional/new/basicLocalFSStorageDomainTest.py
index a590886..3280e16 100644
--- a/tests/functional/new/basicLocalFSStorageDomainTest.py
+++ b/tests/functional/new/basicLocalFSStorageDomainTest.py
@@ -6,10 +6,22 @@
import uuid
import storage.volume
import storage.image
+import logging
+logging.basicConfig(level=logging.DEBUG, format='%(asctime)s TEST %(levelname)s: %(message)s')
from . import testlib
+from . import controlvdsm
class TestBasicLocalFSStorageDomain:
- def notest_flow_connect_create_storage_domain_format_disconnect(self):
+ def setup(self):
+ control_vdsm = controlvdsm.ControlVDSM()
+ control_vdsm.cleanup()
+
+ @classmethod
+ def teardown_class(cls):
+ control_vdsm = controlvdsm.ControlVDSM()
+ control_vdsm.cleanup()
+
+ def test_flow_connect_create_storage_domain_format_disconnect(self):
with testlib.TemporaryDirectory() as directory:
with testlib.VDSMTestTools() as (vdsm, verify):
diff --git a/tests/functional/new/controlvdsm.py b/tests/functional/new/controlvdsm.py
new file mode 100644
index 0000000..a7e8e79
--- /dev/null
+++ b/tests/functional/new/controlvdsm.py
@@ -0,0 +1,52 @@
+import subprocess
+import logging
+import vdsm.vdscli
+import socket
+import vdsm.config
+import time
+
+class ControlVDSM:
+ def cleanup(self):
+ self._stop_service()
+ assert not self._service_running()
+ self._brutally_clean_files()
+# self._restart_service()
+ return self._check_connection()
+
+ def _check_connection(self):
+ useSSL = vdsm.config.config.getboolean('vars', 'ssl')
+ vdsmClient = vdsm.vdscli.connect(useSSL=useSSL)
+ RETRIES = 5
+ for _ in range(RETRIES):
+ try:
+ vdsmClient.getStorageDomainsList()
+ logging.info('VDSM ready for testing')
+ except socket.error as e:
+ logging.warning('could not talk to VDSM: %s' % e)
+ time.sleep(1)
+
+ raise Exception('could not connect to VDSM')
+
+ def _stop_service(self):
+ self._run("sudo service vdsmd stop")
+
+ def _service_running(self):
+ return_code = subprocess.call('sudo service vdsmd status', shell=True, stdout=open('/dev/null','w'), stderr=open('/dev/null','w'))
+ logging.info('vdsm running: %s' % (return_code == 0))
+ return return_code == 0
+
+ def _restart_service(self):
+ self._run("sudo vdsm-tool configure --force")
+ self._run("sudo service vdsmd start")
+
+ def _run(self, command):
+ logging.info('running: %s' % command)
+ return_code = subprocess.call(command, shell=True, close_fds=True, stdout=open('/dev/null','w'), stderr=open('/dev/null','w'))
+ if return_code != 0:
+ logging.warning('failure! command was: %s' % command)
+ else:
+ logging.info('finished.')
+
+ def _brutally_clean_files(self):
+ logging.warning('removing /rhev/data-center without asking too many questions')
+ self._run('sudo rm -fr /rhev/data-center/*')
--
To view, visit http://gerrit.ovirt.org/29423
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I55b86578a681de2b09ac5ee01ca2e3b9443ed13a
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Yoav Kleinberger <ykleinbe(a)redhat.com>
7 years, 8 months
Change in vdsm[master]: vdsm: extract arch-dependent details
by ykleinbe@redhat.com
Yoav Kleinberger has uploaded a new change for review.
Change subject: vdsm: extract arch-dependent details
......................................................................
vdsm: extract arch-dependent details
we now use other packages...
Change-Id: I94bfc8f9d93d8f7a73e69b9390329c342f2904e7
Signed-off-by: Yoav Kleinberger <ykleinbe(a)redhat.com>
---
M vdsm.spec.in
M vdsm/storage/protect/Makefile.am
D vdsm/storage/protect/safelease.c
M vdsm/storage/protect/spmprotect.sh
4 files changed, 43 insertions(+), 766 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/97/31297/1
diff --git a/vdsm.spec.in b/vdsm.spec.in
index ceb238f..2aa8bdb 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -136,10 +136,7 @@
Requires: mom >= 0.4.1
Requires: numactl
-%ifarch x86_64
-Requires: python-dmidecode
-Requires: dmidecode
-%endif
+Requires: vdsm-arch-dependencies
%if 0%{?rhel} >= 7 || 0%{?fedora} >= 18
Requires: libvirt-daemon >= 1.0.2-1
@@ -249,6 +246,8 @@
Requires(post): policycoreutils-python
Requires(preun): policycoreutils-python
+BuildArch: noarch
+
%description
The VDSM service is required by a Virtualization Manager to manage the
Linux hosts. VDSM manages and monitors the host's storage, memory and
@@ -257,7 +256,7 @@
%package cli
Summary: VDSM command line interface
-BuildArch: noarch
+
Requires: %{name}-python = %{version}-%{release}
Requires: %{name}-xmlrpc = %{version}-%{release}
@@ -267,7 +266,7 @@
%package xmlrpc
Summary: VDSM xmlrpc API
-BuildArch: noarch
+
Requires: %{name}-python = %{version}-%{release}
@@ -278,7 +277,7 @@
%package jsonrpc
Summary: VDSM API Server
-BuildArch: noarch
+
Requires: %{name}-python = %{version}-%{release}
Requires: %{name}-yajsonrpc = %{version}-%{release}
@@ -291,7 +290,7 @@
%package yajsonrpc
Summary: JSON RPC server and client implementation
-BuildArch: noarch
+
Requires: python >= 2.6
@@ -300,7 +299,7 @@
%package python-zombiereaper
Summary: Collects zombie processes automatically
-BuildArch: noarch
+
Requires: python >= 2.6
@@ -309,7 +308,7 @@
%package bootstrap
Summary: VDSM bootstrapping package
-BuildArch: noarch
+
%description bootstrap
VDSM bootstrapping package. Used for delivering the bootstrap code onto the
@@ -317,7 +316,7 @@
%package reg
Summary: VDSM registration package
-BuildArch: noarch
+
Requires: %{name} = %{version}-%{release}
Requires: m2crypto
@@ -334,7 +333,7 @@
Requires: python-cpopen >= 1.2.3-5
Requires: m2crypto
Requires: python-ioprocess >= 0.5-1
-BuildArch: noarch
+
%description python
Shared libraries between the various VDSM packages.
@@ -343,7 +342,7 @@
Summary: VDSM Debug Plugin
Requires: %{name}
Requires: %{name}-xmlrpc = %{version}-%{release}
-BuildArch: noarch
+
%description debug-plugin
Used by the trained monkeys at Red Hat to insert chaos and mayhem in to VDSM.
@@ -353,14 +352,14 @@
Requires: %{name} = %{version}-%{release}
Requires: dracut
Requires: python-nose
-BuildArch: noarch
+
%description tests
A test suite for verifying the functionality of a running vdsm instance
%package hook-checkimages
Summary: Qcow2 disk image format check hook for VDSM
-BuildArch: noarch
+
Requires: %{name}
%description hook-checkimages
@@ -369,7 +368,7 @@
%package hook-ethtool-options
Summary: Allow setting custom ethtool options for vdsm controlled nics
-BuildArch: noarch
+
Requires: %{name} = %{version}-%{release}
%description hook-ethtool-options
@@ -380,7 +379,7 @@
%package hook-vhostmd
Summary: VDSM hook set for interaction with vhostmd
Requires: vhostmd
-BuildArch: noarch
+
%description hook-vhostmd
VDSM hook to use vhostmd per VM according to Virtualization Manager requests.
@@ -388,7 +387,7 @@
%package hook-faqemu
Summary: Fake qemu process for VDSM quality assurance
-BuildArch: noarch
+
Requires: %{name}
%description hook-faqemu
@@ -399,7 +398,7 @@
%package hook-directlun
Summary: Direct LUN support for VDSM
-BuildArch: noarch
+
%description hook-directlun
VDSM hook enable user to add storage LUN for VDSM
@@ -407,7 +406,7 @@
%package hook-macbind
Summary: Bind a vNIC to a Bridge
-BuildArch: noarch
+
Requires: %{name} >= 4.14
%description hook-macbind
@@ -415,7 +414,7 @@
%package hook-macspoof
Summary: Disables MAC spoofing filtering
-BuildArch: noarch
+
%description hook-macspoof
VDSM hooks which allow to disable mac spoof filtering
@@ -424,7 +423,7 @@
%package hook-extnet
Summary: Force a vNIC to connect to a specific libvirt network
-BuildArch: noarch
+
Requires: %{name} = %{version}-%{release}
%description hook-extnet
@@ -433,7 +432,7 @@
%package hook-fakevmstats
Summary: Generate random VM statistics
-BuildArch: noarch
+
Requires: %{name}
%description hook-fakevmstats
@@ -441,7 +440,7 @@
%package hook-fileinject
Summary: Allow uploading file to VMs disk
-BuildArch: noarch
+
Requires: python-libguestfs
%description hook-fileinject
@@ -450,14 +449,14 @@
%package hook-floppy
Summary: Allow adding floppy to VM
-BuildArch: noarch
+
%description hook-floppy
Allow adding floppy to VM
%package hook-hostusb
Summary: Allow attaching USB device from host
-BuildArch: noarch
+
Requires: usbutils
%description hook-hostusb
@@ -466,7 +465,7 @@
%package hook-hugepages
Summary: Huge pages enable user to handle VM with 2048KB page files.
-BuildArch: noarch
+
%description hook-hugepages
Hook is getting number of huge pages reserve them for the VM,
@@ -474,7 +473,7 @@
%package hook-isolatedprivatevlan
Summary: Isolated network environment for VMs
-BuildArch: noarch
+
%description hook-isolatedprivatevlan
limit VM traffic to a specific gateway by its mac address,
@@ -484,7 +483,7 @@
%package hook-nestedvt
Summary: Nested Virtualization support for VDSM
-BuildArch: noarch
+
%description hook-nestedvt
If the nested virtualization is enabled in your kvm module
@@ -492,7 +491,7 @@
%package hook-numa
Summary: NUMA support for VDSM
-BuildArch: noarch
+
%description hook-numa
Hooks is getting number/rage of NUMA nodes and NUMA mode,
@@ -500,14 +499,14 @@
%package hook-openstacknet
Summary: OpenStack Network vNICs support for VDSM
-BuildArch: noarch
+
%description hook-openstacknet
Hook for OpenStack Network vNICs.
%package hook-pincpu
Summary: Hook pin VM so specific CPUs
-BuildArch: noarch
+
%description hook-pincpu
pincpu is hook for VDSM.
@@ -515,7 +514,7 @@
%package hook-promisc
Summary: Network interface promiscuous mode support for VDSM
-BuildArch: noarch
+
%description hook-promisc
VDSM promiscuous mode let user define a VM interface that will capture
@@ -523,7 +522,7 @@
%package hook-qemucmdline
Summary: QEMU cmdline hook for VDSM
-BuildArch: noarch
+
Requires: %{name}
%description hook-qemucmdline
@@ -533,14 +532,14 @@
%package hook-qos
Summary: QoS network in/out traffic support for VDSM
-BuildArch: noarch
+
%description hook-qos
Hook adds QoS in/out traffic to VMs interfaces
%package hook-scratchpad
Summary: One time disk creation for VDSM
-BuildArch: noarch
+
%description hook-scratchpad
scratchpad hook for VDSM
@@ -550,7 +549,7 @@
%package hook-smbios
Summary: Adding custom smbios entries to libvirt domain via VDSM
-BuildArch: noarch
+
%description hook-smbios
Adding custom smbios entries to libvirt domain via VDSM
@@ -561,7 +560,7 @@
%if 0%{?rhel} >= 7 || 0%{?fedora} >= 18
Requires: libvirt-daemon-driver-nodedev
%endif
-BuildArch: noarch
+
%description hook-sriov
sr-iov hook enable to add virtual functions exposed by the device
@@ -570,7 +569,7 @@
%package hook-spiceoptions
Summary: To configure spice options for vm
-BuildArch: noarch
+
%description hook-spiceoptions
This vdsm hook can be used to configure some of
@@ -578,7 +577,7 @@
%package hook-vmfex
Summary: vmfex support for VDSM
-BuildArch: noarch
+
Conflicts: hook-vmfex-dev
%description hook-vmfex
@@ -586,7 +585,7 @@
%package hook-vmfex-dev
Summary: VM-FEX vNIC support for VDSM
-BuildArch: noarch
+
Requires: %{name} = %{version}-%{release}
Conflicts: hook-vmfex
@@ -596,7 +595,7 @@
%package hook-vmdisk
Summary: External disk support for VDSM
-BuildArch: noarch
+
%description hook-vmdisk
Hook adds additional disk image for a VM (raw or qcow2)
@@ -604,7 +603,7 @@
%if 0%{?with_gluster}
%package gluster
Summary: Gluster Plugin for VDSM
-BuildArch: noarch
+
Requires: %{name} = %{version}-%{release}
Requires: glusterfs-server
@@ -1051,7 +1050,6 @@
%{_datadir}/%{vdsm_name}/storage/volume.py*
%{_datadir}/%{vdsm_name}/storage/imageRepository/__init__.py*
%{_datadir}/%{vdsm_name}/storage/imageRepository/formatConverter.py*
-%{_libexecdir}/%{vdsm_name}/safelease
%{_libexecdir}/%{vdsm_name}/spmprotect.sh
%{_libexecdir}/%{vdsm_name}/spmstop.sh
%dir %{_libexecdir}/%{vdsm_name}/hooks
diff --git a/vdsm/storage/protect/Makefile.am b/vdsm/storage/protect/Makefile.am
index feb40a9..1eb4661 100644
--- a/vdsm/storage/protect/Makefile.am
+++ b/vdsm/storage/protect/Makefile.am
@@ -6,11 +6,6 @@
# LICENSE_GPL_v2 which accompany this distribution.
#
-vdsmexec_PROGRAMS = safelease
-
dist_vdsmexec_SCRIPTS = \
spmprotect.sh \
spmstop.sh
-
-safelease_SOURCES = \
- safelease.c
diff --git a/vdsm/storage/protect/safelease.c b/vdsm/storage/protect/safelease.c
deleted file mode 100644
index 3814231..0000000
--- a/vdsm/storage/protect/safelease.c
+++ /dev/null
@@ -1,716 +0,0 @@
-/* Locker */
-#define _GNU_SOURCE 1
-#include <stdarg.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <string.h>
-#include <sys/types.h>
-#include <sys/time.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <errno.h>
-#include <time.h>
-#include <signal.h>
-
-#define WARN(fmt, args...) warn(__FUNCTION__, fmt, ## args)
-#define PANIC(fmt, args...) panic(__FUNCTION__, fmt, ## args)
-#define DEBUG(fmt, args...) do { if (debug) warn(__FUNCTION__, fmt, ## args); } while (0)
-
-char *freetag = "------FREE------0000000000000000";
-enum {
- idlen = 16,
- stamplen = 16,
- taglen = idlen + stamplen,
-};
-
-char *progname;
-int debug;
-
-char *id;
-char *path;
-char *request;
-
-long lease_ms;
-long op_max_ms;
-char *iobuf;
-
-inline unsigned long long tv2msec(struct timeval *tv);
-int renew(int fd, off_t offset, char *id, long long *ts);
-
-void
-panic(const char const *fn, char *msg, ...)
-{
- char buf[512];
- va_list va;
- int n;
-
- va_start(va, msg);
- n = vsprintf(buf, msg, va);
- va_end(va);
- buf[n] = 0;
-
- fprintf(stderr, "panic: [%d] %s: %s: (%m)\n", getpid(), fn, buf);
-
- exit(-1);
-}
-
-void
-warn(const char const *fn, char *msg, ...)
-{
- struct timeval tv;
- long long unsigned tscurr;
- char buf[512];
- va_list va;
- int n;
-
- va_start(va, msg);
- n = vsprintf(buf, msg, va);
- va_end(va);
- buf[n] = 0;
-
- gettimeofday(&tv, 0);
- tscurr = tv2msec(&tv);
-
- fprintf(stderr, "[%s:%d:%llu]: %s: %s\n", progname, getpid(), tscurr, fn, buf);
-}
-
-void
-usage(void)
-{
- fprintf(stderr, "Usage: %s [ -h ] <op> [...]\n", progname);
- fprintf(stderr, "Ops:\n"
- "acquire [ -r <path> ] [ -b ] [ -o offset ] <path> <id> <lease_ms> <op_max_ms>\n"
- "renew [ -r <path> ] [ -o offset ] [ -t laststamp ] <path> <id> <lease_ms> <op_max_ms>\n"
- "release [ -f ] [ -o offset ] <path> <id>\n"
- "query [ -o offset ] <path>\n"
- "protect [ -r <path> -i <id>] [ -o offset ] <path> <lease_ms> <op_max_ms> <progname> [<param1> ...]\n"
- );
- fprintf(stderr, "\nNotes:\n"
- "-b - busy loop on lease until lease acquired\n"
- "-f - force release even if lease id is not equal to id\n"
- "-o - offset to lease in path (default is 0)\n"
- "-t - timestamp of last successful renewal\n"
- "Path is a path to a device or a file to use as a sync object.\n"
- "Id is an arbitrary unique string\n"
- "lease_ms is the maximum time in msec that the owner of the lease\n"
- " may hold it without renewing it\n"
- "op_max_ms is the maximum time in msec that a single IO operation may take (must be <= lease_ms).\n"
- "if -r option is used, the path is a readable file/device.\n"
- " The program then validates that its 'id' is written at the given offset.\n"
- " If this is not the case, acquire and renew will fail immediately.\n"
- );
- exit(1);
-}
-
-inline unsigned long long
-tv2msec(struct timeval *tv)
-{
- return tv->tv_sec * 1000ull + tv->tv_usec/1000;
-}
-
-int
-withintimelimits(struct timeval *start, struct timeval *stop)
-{
- unsigned long long delta;
- if (op_max_ms <= 0)
- return 1;
- delta = tv2msec(stop) - tv2msec(start);
- if (delta > op_max_ms) {
- DEBUG("Error - time limit breached: op_max_ms - %ld, time passed - %lld", op_max_ms, delta);
- errno = -ETIMEDOUT;
- return 0;
- }
- return 1;
-}
-
-int
-sametag(const char *tag1, const char *tag2)
-{
- return !memcmp(tag1, tag2, taglen);
-}
-
-int
-isfree(const char *tag)
-{
- return sametag(tag, freetag);
-}
-
-void
-settag(char *tag, const char *src)
-{
- memcpy(tag, src, taglen);
-}
-
-void
-buildtag(char *tag, const char *id, long long ts)
-{
- snprintf(tag, taglen+1, "%-*s%0*llx", idlen, id, stamplen, ts);
- DEBUG("'%s' ts %lld", tag, ts);
-}
-
-int
-sameid(const char *tag, const char *id)
-{
- char _id[idlen+1];
-
- snprintf(_id, idlen+1, "%-*s", idlen, id);
- return !memcmp(tag, _id, idlen);
-}
-
-void
-querytag(const char *tag, char *id, long long *ts)
-{
- char _stamp[stamplen+1] = "";
-
- memcpy(id, tag, idlen);
- id[idlen] = 0;
- memcpy(_stamp, tag+idlen, stamplen);
- *ts = strtoull(_stamp, 0, 16);
-}
-
-int
-readtag(int fd, off_t offset, char *tag, int limit)
-{
- struct timeval start, stop;
- int r;
-
- DEBUG("fd %d offset %ld", fd, offset);
- gettimeofday(&start, 0);
- r = pread(fd, iobuf, 512, offset);
- gettimeofday(&stop, 0);
- DEBUG("r %d %m", r);
- if (r <= 0 || (limit && !withintimelimits(&start, &stop)))
- return -1;
- memcpy(tag, iobuf, taglen);
- return r;
-}
-
-int
-writetag(int fd, off_t offset, const char *tag, int limit)
-{
- struct timeval start, stop;
- int r;
-
- DEBUG("Enter");
- memcpy(iobuf, tag, taglen);
- gettimeofday(&start, 0);
- r = pwrite(fd, iobuf, 512, offset) < taglen ? -1 : 0;
- gettimeofday(&stop, 0);
- DEBUG("Exit r=%ld", r);
- if (r < 0 || (limit && !withintimelimits(&start, &stop)))
- return -1;
- return r;
-}
-
-int
-writetimestamp(int fd, off_t offset, const char *id, char *tag, long long *ts)
-{
- struct timeval tv;
- long long t;
- int r;
-
- gettimeofday(&tv, 0);
- t = tv.tv_sec * 1000000ll + tv.tv_usec;
- buildtag(tag, id, t);
- r = writetag(fd, offset, tag, 1);
- if (r > 0)
- *ts = t;
- return r;
-}
-
-/*
- * Attempt to acquire the lease.
- * Return 1 if succedded, 0 if not , and < 0 on errors.
- */
-int
-acquire(int fd, off_t offset, char *id, int busyloop, long long *ts)
-{
- char curr[taglen+1] = "", last[taglen+1] = "", tag[taglen+1] = "";
- long backoff_usec = (lease_ms + 6 * op_max_ms) * 1000;
- long contend_usec = (2 * op_max_ms) * 1000;
- char dummyid[idlen+1];
-
- if (readtag(fd, offset, curr, 1) < 0)
- return -errno;
-
- settag(last, freetag);
-
- do {
- DEBUG("restart: curr tag is '%s'", curr);
- if (!sametag(curr, last) && !isfree(curr)) do {
- DEBUG("backoff: curr tag is '%s'", curr);
- settag(last, curr);
- usleep(backoff_usec);
- if (readtag(fd, offset, curr, 1) < 0)
- return -errno;
- } while (busyloop && !sametag(curr, last) && !isfree(curr));
- if (!sametag(curr, last) && !isfree(curr)) {
- DEBUG("fail: curr tag is '%s'", curr);
- return 0;
- }
- DEBUG("contend: curr tag is '%s'", curr);
- if (writetimestamp(fd, offset, id, tag, ts) < 0) {
- DEBUG("lost (writetimestamp failed) : curr tag is %s", curr);
- return -errno;
- }
- usleep(contend_usec);
- if (readtag(fd, offset, curr, 1) < 0) {
- DEBUG("lost (readtag failed) : curr tag is %s", curr);
- return -errno;
- }
- } while (busyloop && !sametag(curr, tag));
-
- if (busyloop || sametag(curr, tag)) {
- DEBUG("won : curr tag is %s", curr);
- querytag(curr, dummyid, ts);
- return renew(fd, offset, id, ts);
- }
- DEBUG("lost : curr tag is %s\n our tag is %s", curr, tag);
- return 0;
-}
-
-static void
-handler(int sig)
-{
- PANIC("IO op too long");
-}
-
-long long
-timeleft_ms(long long tsprev)
-{
- struct timeval tv;
- long long tscurr;
-
- tsprev /= 1000;
- gettimeofday(&tv, 0);
- tscurr = tv2msec(&tv);
- DEBUG("time elapsed: %lld/%lld", tscurr - tsprev, lease_ms);
- return lease_ms - (tscurr - tsprev);
-}
-
-/*
- * Attempt to renew the lease.
- * Return 1 if succeded, 0 if not , and < 0 on errors.
- */
-int
-renew(int fd, off_t offset, char *id, long long *ts)
-{
- char curr[taglen+1] = "", tag[taglen+1] = "";
- char dummyid[idlen+1];
- struct sigaction sa;
- long long msleft;
- int rc = 0;
-
- sa.sa_flags = !SA_RESTART;
- sigemptyset(&sa.sa_mask);
- sa.sa_handler = handler;
- if (sigaction(SIGALRM, &sa, NULL) == -1)
- PANIC("sigaction: can't set alarm");
-
- if (readtag(fd, offset, curr, 0) < 0) {
- rc = -errno;
- goto out;
- }
-
- DEBUG("curr tag is '%s'", curr);
- if (!sameid(curr, id)) {
- *ts = 0;
- goto out;
- }
-
- querytag(curr, dummyid, ts);
- msleft = timeleft_ms(*ts);
- if (msleft <= 0) {
- rc = -ETIMEDOUT;
- goto out;
- }
-
- alarm(msleft / 1000);
- DEBUG("updating tag: msleft %lld", msleft);
- if (writetimestamp(fd, offset, id, tag, ts) < 0) {
- rc = -errno;
- goto out;
- }
-
- DEBUG("All good");
- /* disable the alarm because usleep might use the same signal */
- alarm(0);
- return 1;
-
-out:
- alarm(0);
- return rc;
-}
-
-/*
- * Attempt to release the lease.
- * Return 1 if succedded, 0 if not , and < 0 on errors.
- */
-int
-release(int fd, off_t offset, char *id, int force)
-{
- char curr[taglen+1] = "";
-
- if (!force) {
- if (readtag(fd, offset, curr, 0) < 0)
- return -errno;
-
- if (!sameid(curr, id))
- return 0;
- }
-
- return writetag(fd, offset, freetag, 0) < 0 ? -1 : 1;
-}
-
-/*
- * Qeury the lease.
- * Return 1 if succedded, 0 if not , and < 0 on errors.
- */
-int
-query(int fd, off_t offset)
-{
- char curr[taglen+1] = "";
- char id[idlen+1] = "";
- long long ts;
- time_t tsec;
- int tusec;
- char *t;
-
- if (readtag(fd, offset, curr, 0) < 0)
- return -errno;
-
- querytag(curr, id, &ts);
- tsec = ts / 1000000;
- tusec = ts % 1000000;
-
- t = ctime(&tsec);
- t[strlen(t)-1] = 0;
-
- printf("%s: ID %-*s TS %0*llx (%s, %d usec)\n",
- sameid(curr, freetag) ? "FREE" : "LOCKED",
- idlen, id, stamplen, ts, t, tusec);
-
- return 1;
-}
-
-void
-validate_path(const char *path)
-{
- if (access(path, R_OK | W_OK) < 0)
- PANIC("can't access '%s'", path);
-}
-
-void
-validate_id(const char *id)
-{
- if (strlen(id) > idlen)
- PANIC("id must be <= 8 characters");
- if (!strncmp(id, freetag, idlen))
- PANIC("can't lease free stamp");
-}
-
-void
-validate_lease_params(int lease_ms, int op_max_ms)
-{
- if (lease_ms <= 0 || op_max_ms <= 0 || lease_ms < op_max_ms ||
- op_max_ms < 1000 || op_max_ms % 1000 != 0)
- PANIC("bad lease/op max timeouts");
-}
-
-/*
- * Initialize the timeout to one op_max_ms.
- */
-long long
-renew_timeout(void)
-{
- struct timeval tv;
-
- gettimeofday(&tv, 0);
- return tv.tv_sec * 1000000ull + tv.tv_usec - (lease_ms - op_max_ms) * 1000;
-}
-
-int
-cmd_acquire(int argc, char **argv)
-{
- int opt, fd, r, b = 0;
- off_t offset = 0;
- long long ts;
-
- optind = 0;
- while ((opt = getopt(argc, argv, "+hdr:bo:")) != -1) {
- switch (opt) {
- case 'h':
- usage();
- break;
- case 'd':
- debug++;
- break;
- case 'r':
- request = optarg;
- break;
- case 'b':
- b = 1;
- break;
- case 'o':
- offset = strtoul(optarg, 0, 0);
- break;
- }
- }
- if (argc - optind < 4)
- usage();
-
- path = argv[optind++];
- validate_path(path);
- id = argv[optind++];
- validate_id(id);
- lease_ms = strtoul(argv[optind++], 0, 0);
- op_max_ms = strtoul(argv[optind++], 0, 0);
- validate_lease_params(lease_ms, op_max_ms);
-
- DEBUG("path '%s' offset %ld id '%s' lease_ms %ld op_max_ms %ld",
- path, offset, id, lease_ms, op_max_ms);
-
- if ((fd = open(path, O_RDWR | O_DIRECT)) < 0)
- panic("can't open '%s'", path);
-
- r = acquire(fd, offset, id, b, &ts);
-
- close(fd);
-
- if (r == 1) {
- /* print last successful timestamp == aquire time */
- printf("%lld", ts);
- DEBUG("Succeeded");
- return 0;
- } else
- DEBUG("%s (%s)", "Failed", strerror(r));
-
- return 1;
-}
-
-int
-cmd_renew(int argc, char **argv)
-{
- long long ts = renew_timeout();
- off_t offset = 0;
- int opt, fd, r;
-
- optind = 0;
- while ((opt = getopt(argc, argv, "+hdr:o:t:")) != -1) {
- switch (opt) {
- case 'h':
- usage();
- break;
- case 'd':
- debug++;
- break;
- case 'r':
- request = optarg;
- break;
- case 'o':
- offset = strtoul(optarg, 0, 0);
- break;
- case 't':
- ts = strtoll(optarg, 0, 0);
- break;
- }
- }
- if (argc - optind < 4)
- usage();
-
- path = argv[optind++];
- validate_path(path);
- id = argv[optind++];
- validate_id(id);
- lease_ms = strtoul(argv[optind++], 0, 0);
- op_max_ms = strtoul(argv[optind++], 0, 0);
- validate_lease_params(lease_ms, op_max_ms);
-
- DEBUG("path '%s' offset %ld id '%s' lease_ms %ld op_max_ms %ld",
- path, offset, id, lease_ms, op_max_ms);
-
- if ((fd = open(path, O_RDWR | O_DIRECT)) < 0)
- panic("can't open '%s'", path);
-
- r = renew(fd, offset, id, &ts);
-
- close(fd);
-
- /* print out the last successful renewal timestamp, or zero for don't renew */
- printf("%lld\n", ts);
-
- if (r == 1) {
- DEBUG("Succeeded");
- return 0;
- }
-
- DEBUG("%s (%s)", "Failed", strerror(r));
- return 1;
-}
-
-int
-cmd_release(int argc, char **argv)
-{
- int opt, fd, r;
- int force = 0;
- off_t offset = 0;
-
- optind = 0;
- while ((opt = getopt(argc, argv, "+hdfo:")) != -1) {
- switch (opt) {
- case 'h':
- usage();
- break;
- case 'd':
- debug++;
- break;
- case 'f':
- force++;
- break;
- case 'o':
- offset = strtoul(optarg, 0, 0);
- break;
- }
- }
- if (argc - optind < 2)
- usage();
-
- path = argv[optind++];
- validate_path(path);
- id = argv[optind++];
- validate_id(id);
-
- DEBUG("path '%s' offset %ld id '%s' force %d", path, offset, id, force);
-
- if ((fd = open(path, O_RDWR | O_DIRECT)) < 0)
- panic("can't open '%s'", path);
-
- r = release(fd, offset, id, force);
-
- close(fd);
-
- if (r == 1) {
- DEBUG("Succeeded");
- return 0;
- } else
- DEBUG("%s (%s)", "Failed", strerror(r));
-
- return 1;
-}
-
-int
-cmd_query(int argc, char **argv)
-{
- int opt, fd, r;
- off_t offset = 0;
-
- optind = 0;
- while ((opt = getopt(argc, argv, "+hdr:o:")) != -1) {
- switch (opt) {
- case 'h':
- usage();
- break;
- case 'd':
- debug++;
- break;
- case 'r':
- request = optarg;
- break;
- case 'o':
- offset = strtoul(optarg, 0, 0);
- break;
- }
- }
- if (argc - optind < 4)
- usage();
-
- path = argv[optind++];
- validate_path(path);
-
- DEBUG("path '%s' offset %ld id '%s'", path, offset, id);
-
- if ((fd = open(path, O_RDWR | O_DIRECT)) < 0)
- panic("can't open '%s'", path);
-
- r = query(fd, offset);
-
- close(fd);
-
- if (r == 1) {
- DEBUG("Succeeded");
- return 0;
- } else
- DEBUG("%s (%s)", "Failed", strerror(r));
-
- return 1;
-}
-
-int
-cmd_protect(int argc, char **argv)
-{
- return 0;
-}
-
-void
-sig_handler(int sig)
-{
- fprintf(stderr, "%s: Exiting due to signal %d\n", progname, sig);
- exit(0);
-}
-
-int
-main(int argc, char **argv)
-{
- int opt;
- void *v = 0;
-
- signal(SIGTERM, sig_handler);
- signal(SIGINT, sig_handler);
- signal(SIGTRAP, sig_handler);
-
- if (posix_memalign(&v, 4096, 512) != 0)
- {
- fprintf(stderr, "fatal memory allocation error\n");
- return 1;
- }
-
- iobuf = v;
- memset(iobuf, 0, 512);
-
- progname = strrchr(argv[0], '/');
- if (!progname)
- progname = argv[0];
- else
- progname++;
-
- while ((opt = getopt(argc, argv, "+hd")) != -1) {
- switch (opt) {
- case 'h':
- usage();
- break;
- case 'd':
- debug++;
- break;
- }
- }
- if (optind >= argc)
- usage();
-
- if (!strcmp(argv[optind], "acquire"))
- return cmd_acquire(argc - optind, argv + optind);
- if (!strcmp(argv[optind], "renew"))
- return cmd_renew(argc - optind, argv + optind);
- if (!strcmp(argv[optind], "release"))
- return cmd_release(argc - optind, argv + optind);
- if (!strcmp(argv[optind], "query"))
- return cmd_query(argc - optind, argv + optind);
- if (!strcmp(argv[optind], "protect"))
- return cmd_protect(argc - optind, argv + optind);
-
- fprintf(stderr, "unknonwn op <%s>\n", argv[optind]);
- usage();
-
- return 1;
-}
-
diff --git a/vdsm/storage/protect/spmprotect.sh b/vdsm/storage/protect/spmprotect.sh
index f569a91..e864b34 100755
--- a/vdsm/storage/protect/spmprotect.sh
+++ b/vdsm/storage/protect/spmprotect.sh
@@ -24,7 +24,7 @@
SETSID="/usr/bin/setsid"
LOGFILE="/var/log/vdsm/spm-lock.log"
VDS_CLIENT="/usr/bin/vdsClient"
-LEASE_UTIL="./safelease"
+LEASE_UTIL="../safelease/safelease"
KILL="/bin/kill"
PKILL="/usr/bin/pkill"
sdUUID=$2
--
To view, visit http://gerrit.ovirt.org/31297
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I94bfc8f9d93d8f7a73e69b9390329c342f2904e7
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Yoav Kleinberger <ykleinbe(a)redhat.com>
7 years, 8 months
Change in vdsm[master]: tests: added NFS support to functional tests
by ykleinbe@redhat.com
Yoav Kleinberger has uploaded a new change for review.
Change subject: tests: added NFS support to functional tests
......................................................................
tests: added NFS support to functional tests
In this patch, I introduce a new NFS storage context for the storage
functional tests. Since there is much shared logic between NFS and
LocalFS storage context, I introduce a common superclass.
Change-Id: I1781fc400c0604855d3143dde22ccb29e6cc8013
Signed-off-by: Yoav Kleinberger <ykleinbe(a)redhat.com>
---
M tests/functional/basicStorageTest.py
M tests/functional/testlib/storagecontexts/base.py
A tests/functional/testlib/storagecontexts/filebased.py
M tests/functional/testlib/storagecontexts/iscsi.py
M tests/functional/testlib/storagecontexts/localfs.py
A tests/functional/testlib/storagecontexts/nfs.py
6 files changed, 249 insertions(+), 114 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/90/32990/1
diff --git a/tests/functional/basicStorageTest.py b/tests/functional/basicStorageTest.py
index c01d14b..438f8c8 100644
--- a/tests/functional/basicStorageTest.py
+++ b/tests/functional/basicStorageTest.py
@@ -4,6 +4,7 @@
from functional.testlib import controlvdsm
from functional.testlib.storagecontexts import localfs
from functional.testlib.storagecontexts import iscsi
+from functional.testlib.storagecontexts import nfs
class TestBasicStorageDomain(object):
@@ -23,6 +24,9 @@
def testCreateVolumeISCSI(self):
self._testCreateVolume(iscsi.ISCSI)
+ def testCreateVolumeNFS(self):
+ self._testCreateVolume(nfs.NFS)
+
def _testCreateVolume(self, storageBackend):
with storageBackend() as (vdsm, verify):
storageServerID = vdsm.connectStorageServer()
diff --git a/tests/functional/testlib/storagecontexts/base.py b/tests/functional/testlib/storagecontexts/base.py
index 7179694..2e308ad 100644
--- a/tests/functional/testlib/storagecontexts/base.py
+++ b/tests/functional/testlib/storagecontexts/base.py
@@ -73,16 +73,47 @@
class StorageBackend(object):
def __init__(self):
self._vdsmCaller = vdsmcaller.VDSMCaller()
+ self._domainID = self._newUUID()
+ self._poolID = self._newUUID()
+ self._imageID = self._newUUID()
+ self._volumeID = self._newUUID()
+ self._connectionID = self._newUUID()
+
+ def connectionID(self):
+ return self._connectionID
+
+ def volumeID(self):
+ return self._volumeID
+
+ def imageID(self):
+ return self._imageID
+
+ def poolID(self):
+ return self._poolID
+
+ def domainID(self):
+ return self._domainID
def vdsm(self):
return self._vdsmCaller
- def newUUID(self):
+ def _newUUID(self):
return str(uuid.uuid4())
def randomName(self, base):
return "%s_%04d" % (base, random.randint(1, 10000))
+ def createStoragePool(self):
+ POOL_TYPE_DEPRECATED = 0
+ self.vdsm().createStoragePool(
+ POOL_TYPE_DEPRECATED,
+ self.poolID(),
+ self.randomName('pool'),
+ self.domainID(),
+ [self.domainID()],
+ 1)
+ return self.poolID()
+
def connectStoragePool(self, poolID, masterDomainID):
SCSI_KEY_DEPRECATED = 0
self.vdsm().connectStoragePool(
diff --git a/tests/functional/testlib/storagecontexts/filebased.py b/tests/functional/testlib/storagecontexts/filebased.py
new file mode 100644
index 0000000..bdb15d7
--- /dev/null
+++ b/tests/functional/testlib/storagecontexts/filebased.py
@@ -0,0 +1,69 @@
+import os
+import storage.volume
+import storage.image
+from . import base
+import logging
+
+
+class Verify(base.Verify):
+
+ def rhevMountPoint(self):
+ raise Exception('you must override this function')
+
+ def storageServerConnected(self):
+ self.sleepWhileVDSMCompletesTask(duration=2)
+ assert os.path.exists(self.rhevMountPoint())
+
+ def storageDomainCreated(self, domainID):
+ self.sleepWhileVDSMCompletesTask(duration=1)
+ self._directIOTestFileExists()
+ domainRoot = os.path.join(self.rhevMountPoint(), domainID)
+ expectedFiles = [
+ 'images',
+ 'dom_md',
+ 'dom_md/leases',
+ 'dom_md/inbox',
+ 'dom_md/outbox',
+ 'dom_md/metadata',
+ 'dom_md/ids']
+ expectedFullPaths =\
+ [os.path.join(domainRoot, path) for path in expectedFiles]
+ for path in expectedFullPaths:
+ logging.info('verifying path: %s' % path)
+ assert os.path.exists(path)
+
+ def _directIOTestFileExists(self):
+ directIOTestFile = os.path.join(
+ self.rhevMountPoint(),
+ '__DIRECT_IO_TEST__')
+ assert os.path.exists(directIOTestFile)
+
+ def volumeCreated(self, volumeInfo):
+ taskID, domainID, imageID, volumeID = volumeInfo
+ self.waitUntilVDSMTaskFinished(taskID, 20)
+ domainRoot = os.path.join(self.rhevMountPoint(), domainID)
+ imageDirectory = os.path.join(domainRoot, 'images', imageID)
+ assert os.path.exists(imageDirectory)
+ volumeFile = os.path.join(imageDirectory, volumeID)
+ volumeLease = '%s.lease' % volumeFile
+ volumeMeta = '%s.meta' % volumeFile
+ for path in volumeFile, volumeLease, volumeMeta:
+ logging.info('verifying path: %s' % path)
+ assert os.path.exists(path)
+
+
+class FileBased(base.StorageBackend):
+ def createVolume(self, size):
+ PREALLOCATE = 1
+ result = self.vdsm().createVolume(
+ self.domainID(),
+ self.poolID(),
+ self.imageID(),
+ self.largeIntegerXMLRPCWorkaround(size),
+ storage.volume.RAW_FORMAT,
+ PREALLOCATE,
+ storage.image.DATA_DISK_TYPE,
+ self.volumeID(),
+ self.randomName('volume_description'))
+ taskID = result['uuid']
+ return taskID, self.domainID(), self.imageID(), self.volumeID()
diff --git a/tests/functional/testlib/storagecontexts/iscsi.py b/tests/functional/testlib/storagecontexts/iscsi.py
index 0e891ee..eb19715 100644
--- a/tests/functional/testlib/storagecontexts/iscsi.py
+++ b/tests/functional/testlib/storagecontexts/iscsi.py
@@ -60,11 +60,7 @@
base.StorageBackend.__init__(self)
self._iqn = 'iqn.1970-01.functional.test:%04d' %\
random.randint(1, 10000)
- self._volumeGroup = {'uuid': self.newUUID(), 'vgs_uuid': None}
- self._poolID = self.newUUID()
- self._connectionID = self.newUUID()
- self._imageID = self.newUUID()
- self._volumeID = self.newUUID()
+ self._volumeGroup = {'uuid': self.domainID(), 'vgs_uuid': None}
def _targetcli(self, command):
commandAsList = command.split()
@@ -77,6 +73,16 @@
self._fileioBackstore = self.randomName('backfile')
logging.info('using %s, %s' %
(self._fileioBackstore, self._storageFile))
+ self._setupISCSITarget()
+ return (self,
+ Verify(
+ self._iqn,
+ self._volumeGroup,
+ self.vdsm,
+ self.volumeID())
+ )
+
+ def _setupISCSITarget(self):
self._targetcli('/backstores/fileio create %s %s 10G' %
(self._fileioBackstore, self._storageFile))
self._targetcli('/iscsi create %s' % self._iqn)
@@ -88,13 +94,6 @@
self._targetcli('/iscsi/%s/tpg1 set attribute '
'generate_node_acls=1 '
'cache_dynamic_acls=1' % self._iqn)
- return (self,
- Verify(
- self._iqn,
- self._volumeGroup,
- self.vdsm,
- self._volumeID)
- )
def __exit__(self, *args):
doubleDashed = self._volumeGroup['uuid'].replace('-', '--')
@@ -145,27 +144,13 @@
'user': '',
'tpgt': '1',
'password': '',
- 'id': self._connectionID,
+ 'id': self.connectionID(),
'port': '3260'
}
self.vdsm().disconnectStorageServer(
storage.sd.ISCSI_DOMAIN,
- self._poolID,
+ self.poolID(),
[connection])
-
- def createStoragePool(self):
- POOL_TYPE_DEPRECATED = 0
- self.vdsm().createStoragePool(
- POOL_TYPE_DEPRECATED,
- self._poolID,
- self.randomName('pool'),
- self._domainID(),
- [self._domainID()],
- 1)
- return self._poolID
-
- def _domainID(self):
- return self._volumeGroup['uuid']
def _createVG(self):
lun = self._findLUN()
@@ -194,14 +179,14 @@
def createVolume(self, size):
PREALLOCATE = 1
result = self.vdsm().createVolume(
- self._domainID(),
- self._poolID,
- self._imageID,
+ self.domainID(),
+ self.poolID(),
+ self.imageID(),
self.largeIntegerXMLRPCWorkaround(size),
storage.volume.RAW_FORMAT,
PREALLOCATE,
storage.image.DATA_DISK_TYPE,
- self._volumeID,
+ self.volumeID(),
self.randomName('iscsi_description'))
logging.info('createVolume result: %s' % result)
return result['uuid']
diff --git a/tests/functional/testlib/storagecontexts/localfs.py b/tests/functional/testlib/storagecontexts/localfs.py
index ef341a0..6ba2914 100644
--- a/tests/functional/testlib/storagecontexts/localfs.py
+++ b/tests/functional/testlib/storagecontexts/localfs.py
@@ -5,67 +5,24 @@
import storage.sd
import storage.volume
import storage.image
-from . import base
+from . import filebased
-class Verify(base.Verify):
+class Verify(filebased.Verify):
def __init__(self, directory, vdsm):
- base.Verify.__init__(self, vdsm)
+ filebased.Verify.__init__(self, vdsm)
self._directory = directory
- def storageServerConnected(self):
- self.sleepWhileVDSMCompletesTask(duration=2)
+ def rhevMountPoint(self):
transformedDirectory = self._directory.replace('/', '_')
- expectedSymlink = os.path.join(
+ result = os.path.join(
'/rhev/data-center/mnt/',
transformedDirectory)
- assert os.path.lexists(expectedSymlink)
-
- def storageDomainCreated(self, domainID):
- self.sleepWhileVDSMCompletesTask(duration=1)
- self._directIOTestFileExists()
- domainRoot = os.path.join(self._directory, domainID)
- expectedFiles = [
- 'images',
- 'dom_md',
- 'dom_md/leases',
- 'dom_md/inbox',
- 'dom_md/outbox',
- 'dom_md/metadata',
- 'dom_md/ids']
- expectedFullPaths =\
- [os.path.join(domainRoot, path) for path in expectedFiles]
- for path in expectedFullPaths:
- assert os.path.exists(path)
-
- def _directIOTestFileExists(self):
- self.sleepWhileVDSMCompletesTask(duration=1)
- directIOTestFile = os.path.join(self._directory, '__DIRECT_IO_TEST__')
- assert os.path.exists(directIOTestFile)
-
- def volumeCreated(self, volumeInfo):
- taskID, domainID, imageID, volumeID = volumeInfo
- self.waitUntilVDSMTaskFinished(taskID, 20)
- domain_directory = os.path.join(self._directory, domainID)
- image_directory = os.path.join(domain_directory, 'images', imageID)
- assert os.path.exists(image_directory)
- volume_file = os.path.join(image_directory, volumeID)
- volume_lease = '%s.lease' % volume_file
- volume_meta = '%s.meta' % volume_file
- for path in volume_file, volume_lease, volume_meta:
- assert os.path.exists(path)
+ return result
-class LocalFS(base.StorageBackend):
+class LocalFS(filebased.FileBased):
_NON_EXISTANT_POOL = '00000000-0000-0000-0000-000000000000'
-
- def __init__(self):
- base.StorageBackend.__init__(self)
- self._domainID = self.newUUID()
- self._poolID = self.newUUID()
- self._imageID = self.newUUID()
- self._volumeID = self.newUUID()
- self._connectionID = self.newUUID()
def __enter__(self):
self._createDirectoryForLocalFSStorage()
@@ -87,13 +44,13 @@
'user': '',
'tpgt': '1',
'password': '******',
- 'id': self._connectionID,
+ 'id': self.connectionID(),
'port': ''}
result = self.vdsm().connectStorageServer(
storage.sd.LOCALFS_DOMAIN,
self._NON_EXISTANT_POOL,
[localFilesystemConnection])
- assert result['statuslist'][0]['id'] == self._connectionID
+ assert result['statuslist'][0]['id'] == self.connectionID()
def disconnectStorageServer(self):
localFilesystemConnection = {
@@ -102,46 +59,20 @@
'user': '',
'tpgt': '1',
'password': '******',
- 'id': self._connectionID,
+ 'id': self.connectionID(),
'port': ''}
self.vdsm().disconnectStorageServer(
storage.sd.LOCALFS_DOMAIN,
self._NON_EXISTANT_POOL,
[localFilesystemConnection])
- def createStoragePool(self):
- POOL_TYPE_DEPRECATED = 0
- self.vdsm().createStoragePool(
- POOL_TYPE_DEPRECATED,
- self._poolID,
- self.randomName('pool'),
- self._domainID,
- [self._domainID],
- 1)
- return self._poolID
-
def createStorageDomain(self):
DOMAIN_VERSION = 3
self.vdsm().createStorageDomain(
storage.sd.LOCALFS_DOMAIN,
- self._domainID,
+ self.domainID(),
'some_name',
self._directory,
storage.sd.DATA_DOMAIN,
DOMAIN_VERSION)
- return self._domainID
-
- def createVolume(self, size):
- PREALLOCATE = 1
- result = self.vdsm().createVolume(
- self._domainID,
- self._poolID,
- self._imageID,
- self.largeIntegerXMLRPCWorkaround(size),
- storage.volume.RAW_FORMAT,
- PREALLOCATE,
- storage.image.DATA_DISK_TYPE,
- self._volumeID,
- self.randomName('localfs_description'))
- taskID = result['uuid']
- return taskID, self._domainID, self._imageID, self._volumeID
+ return self.domainID()
diff --git a/tests/functional/testlib/storagecontexts/nfs.py b/tests/functional/testlib/storagecontexts/nfs.py
new file mode 100644
index 0000000..cdde3a1
--- /dev/null
+++ b/tests/functional/testlib/storagecontexts/nfs.py
@@ -0,0 +1,115 @@
+import subprocess
+import tempfile
+import pwd
+import shutil
+import os
+import storage.sd
+from . import filebased
+
+
+class Verify(filebased.Verify):
+ def __init__(self, directory, vdsm):
+ filebased.Verify.__init__(self, vdsm)
+ self._directory = directory
+
+ def rhevMountPoint(self):
+ transformedDirectory = self._directory.replace('/', '_')
+ result = os.path.join(
+ '/rhev/data-center/mnt/',
+ '127.0.0.1:%s' % transformedDirectory)
+ return result
+
+
+class NFS(filebased.FileBased):
+ _NON_EXISTANT_POOL = '00000000-0000-0000-0000-000000000000'
+
+ def __enter__(self):
+ self._setupNFSExport()
+ return self, Verify(self._directory, self.vdsm)
+
+ def _setupNFSExport(self):
+ self._export()
+ self._restartNFSServer()
+ mountPoint = tempfile.mkdtemp()
+ COMMAND = 'sudo --non-interactive mount -t nfs localhost:%s %s' %\
+ (self._directory, mountPoint)
+ subprocess.check_call(COMMAND, shell=True)
+ self._setPermissions(mountPoint)
+ subprocess.check_call(
+ 'sudo --non-interactive umount %s' % mountPoint, shell=True)
+ os.rmdir(mountPoint)
+
+ def _export(self):
+ self._directory = tempfile.mkdtemp('', 'nfstest', '/var/tmp')
+ self._previousExports = open('/etc/exports').read()
+ with open('/etc/exports', 'a') as f:
+ line = '%s 127.0.0.1(rw,sync,no_root_squash,no_all_squash,fsid=0)\n'\
+ % self._directory
+ f.write(line)
+
+ def _unexport(self):
+ self._umount()
+ with open('/etc/exports', 'w') as f:
+ f.write(self._previousExports)
+ self._restartNFSServer()
+
+ def _umount(self):
+ COMMAND = 'sudo --non-interactive umount 127.0.0.1:%s'\
+ % self._directory
+ code = subprocess.call(COMMAND, shell=True)
+
+ def _setPermissions(self, directory):
+ vdsmUser = pwd.getpwnam('vdsm')
+ os.chown(directory, vdsmUser.pw_uid, vdsmUser.pw_gid)
+ os.chmod(directory, 0755)
+
+ def _restartNFSServer(self):
+ subprocess.check_call(
+ 'sudo --non-interactive service nfs-server restart', shell=True)
+
+ def __exit__(self, *args):
+ self._unexport()
+ shutil.rmtree(self._directory)
+
+ def connectStorageServer(self):
+ connection = {
+ 'connection': self._nfsPath(),
+ 'iqn': '',
+ 'user': '',
+ 'tpgt': '1',
+ 'password': '******',
+ 'id': self.connectionID(),
+ 'port': ''}
+ result = self.vdsm().connectStorageServer(
+ storage.sd.NFS_DOMAIN,
+ self._NON_EXISTANT_POOL,
+ [connection])
+ assert result['statuslist'][0]['id'] == self.connectionID()
+
+ def disconnectStorageServer(self):
+ connection = {
+ 'connection': self._nfsPath(),
+ 'iqn': '',
+ 'user': '',
+ 'tpgt': '1',
+ 'password': '******',
+ 'id': self.connectionID(),
+ 'port': ''}
+ self.vdsm().disconnectStorageServer(
+ storage.sd.LOCALFS_DOMAIN,
+ self._NON_EXISTANT_POOL,
+ [connection])
+
+ def createStorageDomain(self):
+ DOMAIN_VERSION = 3
+ self.vdsm().createStorageDomain(
+ storage.sd.NFS_DOMAIN,
+ self.domainID(),
+ 'some_name',
+ self._nfsPath(),
+ storage.sd.DATA_DOMAIN,
+ DOMAIN_VERSION)
+ return self.domainID()
+
+ def _nfsPath(self):
+ return '127.0.0.1:%s' % self._directory
--
To view, visit http://gerrit.ovirt.org/32990
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I1781fc400c0604855d3143dde22ccb29e6cc8013
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Yoav Kleinberger <ykleinbe(a)redhat.com>
7 years, 8 months
Change in vdsm[master]: tests: new functional tests for vdsm storage
by ykleinbe@redhat.com
Yoav Kleinberger has uploaded a new change for review.
Change subject: tests: new functional tests for vdsm storage
......................................................................
tests: new functional tests for vdsm storage
Ultimately, the purpose of this patch is to replace the existing
tests/functional/storageTests.py, henceforth "the old test".
The old test does not, in fact, verify VDSM behaviour. It only checks
for the return codes that VDSM returns to its caller.
This patch introduces a framework of "test contexts" that is extensible to various
storage backends. Each test context, be it iscsi, nfs or some other
storage type, knows how to tell VDSM to create its particular type of
storage domain, and also knows how to verify that observable actions
(e.g. the creation of a logical volume in an LVM volume group) have
actually been performed.
Currently, only localfs and iscsi are supported. Other storage backends
will be added later.
Change-Id: I1703e7c1dc223ff707775865cd14c7dd62314caf
Bug-Url: https://bugzilla.redhat.com/??????
Signed-off-by: Yoav Kleinberger <ykleinbe(a)redhat.com>
---
A tests/functional/basicStorageTest.py
A tests/functional/testlib/__init__.py
A tests/functional/testlib/controlvdsm.py
A tests/functional/testlib/testcontexts/__init__.py
A tests/functional/testlib/testcontexts/base.py
A tests/functional/testlib/testcontexts/iscsi.py
A tests/functional/testlib/testcontexts/localfs.py
A tests/run_functional_storage_tests.sh
8 files changed, 459 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/96/32496/1
diff --git a/tests/functional/basicStorageTest.py b/tests/functional/basicStorageTest.py
new file mode 100644
index 0000000..c78e4b2
--- /dev/null
+++ b/tests/functional/basicStorageTest.py
@@ -0,0 +1,42 @@
+import storage.volume
+import storage.image
+import logging
+from testlib import controlvdsm
+from testlib.testcontexts import localfs
+from testlib.testcontexts import iscsi
+
+class TestBasicStorageDomain:
+ @classmethod
+ def setup_class(cls):
+ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s TEST %(levelname)s: %(message)s')
+
+ def setup(self):
+ controlVDSM = controlvdsm.ControlVDSM()
+ controlVDSM.cleanup()
+
+ def testCreateVolumeLocalFS(self):
+ self._testCreateVolume(localfs.LocalFS)
+
+ def testCreateVolumeISCSI(self):
+ self._testCreateVolume(iscsi.ISCSI)
+
+ def _testCreateVolume(self, storageContext):
+ with storageContext() as (vdsm, verify):
+ storageServerID = vdsm.connectStorageServer()
+ verify.storageServerConnected()
+
+ domainID = vdsm.createStorageDomain()
+ verify.storageDomainCreated(domainID)
+
+ poolID = vdsm.createStoragePool()
+ verify.storagePoolCreated(poolID, masterDomainID = domainID)
+
+ vdsm.connectStoragePool(poolID, masterDomainID = domainID)
+ vdsm.spmStart(poolID)
+ verify.spmStarted(poolID)
+
+ vdsm.activateStorageDomain(domainID, poolID)
+ GIGABYTE = 1024 ** 3
+ volumeInfo = vdsm.createVolume(1 * GIGABYTE)
+ verify.volumeCreated(volumeInfo)
+ vdsm.disconnectStorageServer()
diff --git a/tests/functional/testlib/__init__.py b/tests/functional/testlib/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/functional/testlib/__init__.py
diff --git a/tests/functional/testlib/controlvdsm.py b/tests/functional/testlib/controlvdsm.py
new file mode 100644
index 0000000..a983bf3
--- /dev/null
+++ b/tests/functional/testlib/controlvdsm.py
@@ -0,0 +1,53 @@
+import subprocess
+import logging
+import vdsm.vdscli
+import socket
+import vdsm.config
+import time
+
+class ControlVDSM:
+ def cleanup(self):
+ self._stopService()
+ assert not self._serviceRunning()
+ self._brutallyCleanFiles()
+ self._restartService()
+ return self._checkConnection()
+
+ def _checkConnection(self):
+ useSSL = vdsm.config.config.getboolean('vars', 'ssl')
+ vdsmClient = vdsm.vdscli.connect(useSSL=useSSL)
+ RETRIES = 5
+ for _ in range(RETRIES):
+ try:
+ vdsmClient.getStorageDomainsList()
+ logging.info('connection to VDSM succeeded')
+ return
+ except socket.error as e:
+ logging.warning('could not talk to VDSM: %s' % e)
+ time.sleep(1)
+
+ raise Exception('could not connect to VDSM')
+
+ def _stopService(self):
+ self._run("sudo service vdsmd stop")
+
+ def _serviceRunning(self):
+ returnCode = subprocess.call('sudo service vdsmd status', shell=True, stdout=open('/dev/null','w'), stderr=open('/dev/null','w'))
+ logging.info('vdsm running: %s' % (returnCode == 0))
+ return returnCode == 0
+
+ def _restartService(self):
+ self._run("sudo vdsm-tool configure --force")
+ self._run("sudo service vdsmd start")
+
+ def _run(self, command):
+ logging.info('running: %s' % command)
+ returnCode = subprocess.call(command, shell=True, close_fds=True, stdout=open('/dev/null','w'), stderr=open('/dev/null','w'))
+ if returnCode != 0:
+ logging.warning('failure! command was: %s' % command)
+ else:
+ logging.info('finished.')
+
+ def _brutallyCleanFiles(self):
+ logging.warning('removing /rhev/data-center without asking too many questions')
+ self._run('sudo rm -fr /rhev/data-center/*')
diff --git a/tests/functional/testlib/testcontexts/__init__.py b/tests/functional/testlib/testcontexts/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/functional/testlib/testcontexts/__init__.py
diff --git a/tests/functional/testlib/testcontexts/base.py b/tests/functional/testlib/testcontexts/base.py
new file mode 100644
index 0000000..bccebdf
--- /dev/null
+++ b/tests/functional/testlib/testcontexts/base.py
@@ -0,0 +1,101 @@
+import os
+import vdsm.vdscli
+import vdsm.config
+import random
+import uuid
+import logging
+import time
+
+class Verify:
+ def __init__(self, vdsm):
+ self._vdsm = vdsm
+
+ def assertPathExists(self, path, link = False):
+ logging.info('verifying path: %s' % path)
+ if link:
+ assert os.path.lexists(path)
+ else:
+ assert os.path.exists(path)
+
+ def assertPathDoesNotExist(self, path, link = False):
+ if link:
+ assert not os.path.lexists(path)
+ else:
+ assert not os.path.exists(path)
+
+ def waitForVDSMToFinishTask(self, duration = 1):
+ time.sleep(duration)
+
+ def storagePoolCreated(self, poolID, masterDomainID):
+ self.waitForVDSMToFinishTask()
+ linkToDomain = os.path.join('/rhev/data-center', poolID, masterDomainID)
+ masterAliasLink = os.path.join('/rhev/data-center', poolID, 'mastersd')
+ logging.info('verifying domain %s in pool %s' % (masterDomainID, poolID))
+ assert os.path.lexists(linkToDomain)
+ logging.info('verifying symlink to master domain')
+ assert os.path.lexists(masterAliasLink)
+
+ def waitFor(self, timeout, description, predicate, *args, **kwargs):
+ logging.info('waiting for "%s"' % description)
+ start = time.time()
+ for _ in xrange(timeout):
+ if predicate(*args, **kwargs):
+ logging.info('it took %s seconds' % (time.time() - start))
+ return
+ time.sleep(1)
+
+ assert False, 'waited %s seconds for "%s" but it did not happen' % (timeout, description)
+
+ def spmStarted(self, poolID):
+ self.waitForVDSMToFinishTask()
+ masterDomainDirectory = '/rhev/data-center/%s/mastersd' % poolID
+ master = os.path.join(masterDomainDirectory, 'master')
+ tasks = os.path.join(master, 'tasks')
+ vms = os.path.join(master, 'vms')
+ allExist = lambda: ( os.path.exists(master) and os.path.exists(tasks) and os.path.exists(vms) )
+ self.waitFor(60, 'SPM related subdirectories exist', allExist)
+
+ def waitOnVDSMTask(self, taskID, timeout):
+ taskFinished = lambda: (self._taskStatus(taskID)['taskState'] == 'finished')
+ self.waitFor(timeout, 'vdsm task to be finished', taskFinished)
+ taskStatus = self._taskStatus(taskID)
+ assert taskStatus['code'] == 0, taskStatus['message']
+
+ def _taskStatus(self, taskID):
+ result = self._vdsm.getTaskStatus(taskID)
+ assert result['status']['code'] == 0
+ taskStatus = result['taskStatus']
+ return taskStatus
+
+class StorageBackend:
+ def __init__(self):
+ useSSL = vdsm.config.config.getboolean('vars', 'ssl')
+ self._vdsm = vdsm.vdscli.connect(useSSL=useSSL)
+
+ def newUUID(self):
+ return str(uuid.uuid4())
+
+ def randomName(self, base):
+ return "%s_%04d" % (base, random.randint(1,10000))
+
+ def connectStoragePool(self, poolID, masterDomainID):
+ SCSI_KEY_DEPRECATED = 0
+ result = self._vdsm.connectStoragePool(poolID, 1, SCSI_KEY_DEPRECATED, masterDomainID, 1)
+ self.verifyVDSMSuccess(result)
+
+ def spmStart(self, poolID):
+ RECOVERY_MODE_DEPRECATED = 0
+ SCSI_FENCING_DEPRECATED = 0
+ result = self._vdsm.spmStart(poolID, -1, '-1', SCSI_FENCING_DEPRECATED, RECOVERY_MODE_DEPRECATED)
+ self.verifyVDSMSuccess(result)
+
+ def activateStorageDomain(self, domainID, poolID):
+ result = self._vdsm.activateStorageDomain(domainID,poolID)
+ self.verifyVDSMSuccess(result)
+
+ def stringForXMLRPC(self, number):
+ return str(number)
+
+ def verifyVDSMSuccess(self, result):
+ if result[ 'status' ][ 'code' ] != 0:
+ raise Exception('expected OK result from VDSM, got "%s" instead' % str(result))
diff --git a/tests/functional/testlib/testcontexts/iscsi.py b/tests/functional/testlib/testcontexts/iscsi.py
new file mode 100644
index 0000000..96d6958
--- /dev/null
+++ b/tests/functional/testlib/testcontexts/iscsi.py
@@ -0,0 +1,150 @@
+import os
+import random
+import logging
+import tempfile
+import shutil
+import subprocess
+import glob
+import storage.sd
+import storage.volume
+import storage.image
+from . import base
+
+class Verify(base.Verify):
+ def __init__(self, iqn, volumeGroup, vdsm, volumeID):
+ base.Verify.__init__(self, vdsm)
+ self._iqn = iqn
+ self._volumeGroup = volumeGroup
+ self._volumeID = volumeID
+
+ def storageServerConnected(self):
+ targetNameFiles = glob.glob('/sys/devices/platform/host*/session*/iscsi_session/*/targetname')
+ targetNames = [ open(file).read().strip() for file in targetNameFiles ]
+ assert self._iqn in targetNames
+
+ def storageDomainCreated(self, domainID):
+ self.waitFor(10, 'storage domain exists', self._storageDomainVolumesExist, domainID)
+
+ def _storageDomainVolumesExist(self, domainID):
+ for name in [ 'ids', 'inbox', 'leases', 'master', 'metadata', 'outbox' ]:
+ doubleDashed = domainID.replace('-', '--')
+ expectedPath = os.path.join('/dev/mapper', '%s-%s' % (doubleDashed, name))
+ logging.info('verifying: %s' % expectedPath)
+ if not os.path.lexists(expectedPath):
+ return False
+
+ return True
+
+ def volumeCreated(self, taskID):
+ self.waitOnVDSMTask(taskID, 10)
+ result = subprocess.call('sudo lvs %s | grep %s' % (self._volumeGroup['uuid'], self._volumeID), shell = True)
+ assert result == 0, "did not find logical volume in volume group"
+
+class ISCSI(base.StorageBackend):
+ _NULL_UUID = '00000000-0000-0000-0000-000000000000'
+ def __init__(self):
+ base.StorageBackend.__init__(self)
+ self._iqn = 'iqn.1970-01.functional.test:%04d' % random.randint(1,10000)
+ self._volumeGroup = { 'uuid': self.newUUID(), 'vgs_uuid': None }
+ self._poolID = self.newUUID()
+ self._connectionID = self.newUUID()
+ self._imageID = self.newUUID()
+ self._volumeID = self.newUUID()
+ logging.info( 'using the following attributes:' )
+ for name in [ '_volumeGroup', '_iqn', '_poolID', '_connectionID', '_imageID', '_volumeID' ]:
+ logging.info( '%s => %s' % ( name, getattr(self,name) ) )
+
+ def _targetcli(self, command):
+ commandAsList = command.split()
+ logging.info('running targetcli: targetcli %s' % commandAsList)
+ subprocess.check_call( [ 'targetcli' ] + commandAsList, close_fds = True )
+
+ def __enter__(self):
+ self._testDirectory = tempfile.mkdtemp()
+ self._storageFile = os.path.join(self._testDirectory, 'testfile')
+ self._fileioBackstore = self.randomName('backfile')
+ logging.info('using %s, %s' % (self._fileioBackstore, self._storageFile))
+ self._targetcli('/backstores/fileio create %s %s 10G' % (self._fileioBackstore, self._storageFile))
+ self._targetcli('/iscsi create %s' % self._iqn)
+ self._targetcli('/iscsi/%s/tpg1/luns create /backstores/fileio/%s' % (self._iqn, self._fileioBackstore))
+ self._targetcli('/iscsi/%s/tpg1 set attribute authentication=0 demo_mode_write_protect=0' % self._iqn)
+ self._targetcli('/iscsi/%s/tpg1 set attribute generate_node_acls=1 cache_dynamic_acls=1' % self._iqn)
+ return self, Verify(self._iqn, self._volumeGroup, self._vdsm, self._volumeID)
+
+ def __exit__(self, *args):
+ doubleDashed = self._volumeGroup['uuid'].replace('-', '--')
+ mapperDevices = glob.glob('/dev/mapper/%s*' % doubleDashed ) + [ '/dev/mapper/%s' % self._lunGUID ]
+ for device in mapperDevices:
+ logging.info('removing %s' % device)
+ result = subprocess.call('sudo dmsetup remove %s' % device, shell=True)
+ if result != 0:
+ logging.warning('could not remove %s' % device)
+ self._targetcli( '/iscsi delete %s' % self._iqn )
+ self._targetcli( '/backstores/fileio delete %s' % self._fileioBackstore )
+ shutil.rmtree(self._testDirectory)
+
+ def connectStorageServer(self):
+ connection = {'connection': '127.0.0.1', 'iqn': self._iqn, 'user': '', 'tpgt': '1', 'password': '', 'id': self._NULL_UUID, 'port': '3260'}
+ result = self._vdsm.connectStorageServer(
+ storage.sd.ISCSI_DOMAIN,
+ self._NULL_UUID,
+ [ connection ])
+ self.verifyVDSMSuccess(result)
+ connectionDetails = result['statuslist'][0]
+ assert connectionDetails['status'] == 0
+ logging.info( 'got id %s', connectionDetails['id'] )
+
+ def _findLUN(self):
+ result = self._vdsm.getDeviceList(storage.sd.ISCSI_DOMAIN)
+ for device in result['devList']:
+ for path in device['pathlist']:
+ if path['iqn'] == self._iqn:
+ logging.info( 'vdsm reports our LUN %s' % device )
+ return device
+
+ raise Exception("LUN not found!")
+
+ def disconnectStorageServer(self):
+ connection = {'connection': '127.0.0.1', 'iqn': self._iqn, 'user': '', 'tpgt': '1', 'password': '', 'id': self._connectionID, 'port': '3260'}
+ result = self._vdsm.disconnectStorageServer(
+ storage.sd.ISCSI_DOMAIN,
+ self._poolID,
+ [ connection ])
+ self.verifyVDSMSuccess(result)
+
+ def createStoragePool(self):
+ POOL_TYPE_DEPRECATED = 0
+ result = self._vdsm.createStoragePool(POOL_TYPE_DEPRECATED, self._poolID, self.randomName('pool'), self._domainID(), [ self._domainID() ], 1)
+ self.verifyVDSMSuccess(result)
+ return self._poolID
+
+ def _domainID(self):
+ return self._volumeGroup['uuid']
+
+ def _createVG(self):
+ lun = self._findLUN()
+ result = self._vdsm.createVG(self._volumeGroup['uuid'], [ lun['GUID'] ])
+ logging.info('createVG returned %s' % result)
+ self.verifyVDSMSuccess(result)
+ self._volumeGroup[ 'vgs_uuid' ] = result[ 'uuid' ]
+ self._lunGUID = lun['GUID']
+
+ def createStorageDomain(self):
+ self._createVG()
+ self._createStorageDomain()
+ return self._volumeGroup['uuid']
+
+ def _createStorageDomain(self):
+ domainID = self._volumeGroup['uuid']
+ DOMAIN_VERSION = 3
+ domainName = self.randomName( 'some_name' )
+ result = self._vdsm.createStorageDomain(storage.sd.ISCSI_DOMAIN, domainID, domainName, self._volumeGroup['vgs_uuid'], storage.sd.DATA_DOMAIN, DOMAIN_VERSION)
+ self.verifyVDSMSuccess(result)
+
+ def createVolume(self, size):
+ PREALLOCATE = 1
+ result = self._vdsm.createVolume(self._domainID(), self._poolID, self._imageID,
+ self.stringForXMLRPC(size), storage.volume.RAW_FORMAT, PREALLOCATE, storage.image.DATA_DISK_TYPE, self._volumeID, self.randomName('iscsi_description'))
+ logging.info( 'createVolume result: %s' % result)
+ self.verifyVDSMSuccess(result)
+ return result[ 'uuid' ]
diff --git a/tests/functional/testlib/testcontexts/localfs.py b/tests/functional/testlib/testcontexts/localfs.py
new file mode 100644
index 0000000..b498b6e
--- /dev/null
+++ b/tests/functional/testlib/testcontexts/localfs.py
@@ -0,0 +1,104 @@
+import tempfile
+import pwd
+import shutil
+import os
+import storage.sd
+import storage.volume
+import storage.image
+from . import base
+
+class Verify(base.Verify):
+ def __init__(self, directory, vdsm):
+ base.Verify.__init__(self, vdsm)
+ self._directory = directory
+
+ def storageServerConnected(self):
+ self.waitForVDSMToFinishTask(2)
+ transformedDirectory = self._directory.replace( '/', '_' )
+ expectedSymlink = os.path.join('/rhev/data-center/mnt/', transformedDirectory)
+ self.assertPathExists(expectedSymlink, link = True)
+
+ def storageDomainCreated(self, domainID):
+ self.waitForVDSMToFinishTask()
+ self._directIOTestFileExists()
+ domainRoot = os.path.join(self._directory, domainID)
+ expectedFiles = [ 'images', 'dom_md', 'dom_md/leases', 'dom_md/inbox', 'dom_md/outbox', 'dom_md/metadata', 'dom_md/ids' ]
+ expectedFullPaths = [ os.path.join(domainRoot, path) for path in expectedFiles ]
+ for path in expectedFullPaths:
+ assert os.path.exists(path)
+
+ def _directIOTestFileExists(self):
+ self.waitForVDSMToFinishTask()
+ directIOTestFile = os.path.join(self._directory, '__DIRECT_IO_TEST__')
+ self.assertPathExists(directIOTestFile)
+
+ def volumeCreated(self, volumeInfo):
+ taskID, domainID, imageID, volumeID = volumeInfo
+ self.waitOnVDSMTask(taskID, 20)
+ domain_directory = os.path.join(self._directory, domainID)
+ image_directory = os.path.join(domain_directory, 'images', imageID)
+ self.assertPathExists(image_directory)
+ volume_file = os.path.join(image_directory, volumeID)
+ volume_lease = '%s.lease' % volume_file
+ volume_meta = '%s.meta' % volume_file
+ for path in volume_file, volume_lease, volume_meta:
+ self.assertPathExists(path)
+
+class LocalFS(base.StorageBackend):
+ _NON_EXISTANT_POOL = '00000000-0000-0000-0000-000000000000'
+ def __init__(self):
+ base.StorageBackend.__init__(self)
+ self._domainID = self.newUUID()
+ self._poolID = self.newUUID()
+ self._imageID = self.newUUID()
+ self._volumeID = self.newUUID()
+ self._connectionID = self.newUUID()
+
+ def __enter__(self):
+ self._createDirectoryForLocalFSStorage()
+ return self, Verify(self._directory, self._vdsm)
+
+ def _createDirectoryForLocalFSStorage(self):
+ self._directory = tempfile.mkdtemp('', 'localfstest', '/var/tmp')
+ vdsm_user = pwd.getpwnam('vdsm')
+ os.chown(self._directory, vdsm_user.pw_uid, vdsm_user.pw_gid)
+ os.chmod(self._directory, 0755)
+
+ def __exit__(self, *args):
+ shutil.rmtree(self._directory)
+
+ def connectStorageServer(self):
+ localFilesystemConnection = {'connection': self._directory, 'iqn': '', 'user': '', 'tpgt': '1', 'password': '******', 'id': self._connectionID, 'port': ''}
+ result = self._vdsm.connectStorageServer(
+ storage.sd.LOCALFS_DOMAIN,
+ self._NON_EXISTANT_POOL,
+ [ localFilesystemConnection ])
+ self.verifyVDSMSuccess(result)
+ assert result[ 'statuslist' ][ 0 ][ 'id' ] == self._connectionID
+
+ def disconnectStorageServer(self):
+ localFilesystemConnection = {'connection': self._directory, 'iqn': '', 'user': '', 'tpgt': '1', 'password': '******', 'id': self._connectionID, 'port': ''}
+ result = self._vdsm.disconnectStorageServer(
+ storage.sd.LOCALFS_DOMAIN,
+ self._NON_EXISTANT_POOL,
+ [ localFilesystemConnection ])
+ self.verifyVDSMSuccess(result)
+
+ def createStoragePool(self):
+ POOL_TYPE_DEPRECATED = 0
+ result = self._vdsm.createStoragePool(POOL_TYPE_DEPRECATED, self._poolID, self.randomName('pool'), self._domainID, [ self._domainID ], 1)
+ self.verifyVDSMSuccess(result)
+ return self._poolID
+
+ def createStorageDomain(self):
+ DOMAIN_VERSION = 3
+ result = self._vdsm.createStorageDomain(storage.sd.LOCALFS_DOMAIN, self._domainID, 'some_name', self._directory, storage.sd.DATA_DOMAIN, DOMAIN_VERSION)
+ self.verifyVDSMSuccess(result)
+ return self._domainID
+
+ def createVolume(self, size):
+ PREALLOCATE = 1
+ result = self._vdsm.createVolume(self._domainID, self._poolID, self._imageID, self.stringForXMLRPC(size), storage.volume.RAW_FORMAT, PREALLOCATE, storage.image.DATA_DISK_TYPE, self._volumeID, self.randomName('localfs_description'))
+ self.verifyVDSMSuccess(result)
+ taskID = result[ 'uuid' ]
+ return taskID, self._domainID, self._imageID, self._volumeID
diff --git a/tests/run_functional_storage_tests.sh b/tests/run_functional_storage_tests.sh
new file mode 100755
index 0000000..cb544ee
--- /dev/null
+++ b/tests/run_functional_storage_tests.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+if [[ "$1" = "--help" ]]; then
+ echo "run_functional_storage_tests.sh [--verbose]"
+ exit -1
+fi
+if [[ "$1" = "--verbose" ]]; then
+ verbose="--nologcapture"
+fi
+sudo PYTHONPATH=../lib:../vdsm:functional nosetests -s $verbose functional/basicStorageTest.py
--
To view, visit http://gerrit.ovirt.org/32496
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I1703e7c1dc223ff707775865cd14c7dd62314caf
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Yoav Kleinberger <ykleinbe(a)redhat.com>
7 years, 8 months
Change in vdsm[master]: tests: alignmentScanTests.py is a functional test
by ykleinbe@redhat.com
Yoav Kleinberger has uploaded a new change for review.
Change subject: tests: alignmentScanTests.py is a functional test
......................................................................
tests: alignmentScanTests.py is a functional test
Since alignmentScanTests invokes actual storage code, and does not mock
it out, it should be categorized as a functional test.
Change-Id: I4e20d17c3ebee1203bb5a721ce44d5867570ce8e
Signed-off-by: Yoav Kleinberger <ykleinbe(a)redhat.com>
---
M tests/Makefile.am
M tests/functional/Makefile.am
R tests/functional/alignmentScanTests.py
3 files changed, 1 insertion(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/45/29745/1
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 4ef8f7d..f7f4f97 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -23,7 +23,6 @@
SUBDIRS = functional
test_modules = \
- alignmentScanTests.py \
blocksdTests.py \
bridgeTests.py \
cPopenTests.py \
diff --git a/tests/functional/Makefile.am b/tests/functional/Makefile.am
index b87fdf0..4fc2cc8 100644
--- a/tests/functional/Makefile.am
+++ b/tests/functional/Makefile.am
@@ -33,6 +33,7 @@
vmRecoveryTests.py \
storageTests.py \
veth.py \
+ alignmentScanTests.py \
$(NULL)
dist_vdsmfunctests_DATA = \
diff --git a/tests/alignmentScanTests.py b/tests/functional/alignmentScanTests.py
similarity index 100%
rename from tests/alignmentScanTests.py
rename to tests/functional/alignmentScanTests.py
--
To view, visit http://gerrit.ovirt.org/29745
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I4e20d17c3ebee1203bb5a721ce44d5867570ce8e
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Yoav Kleinberger <ykleinbe(a)redhat.com>
7 years, 8 months
Change in vdsm[master]: tests: use 'localhost' explicitly in test
by ykleinbe@redhat.com
Yoav Kleinberger has uploaded a new change for review.
Change subject: tests: use 'localhost' explicitly in test
......................................................................
tests: use 'localhost' explicitly in test
Previously tests could on some machines (in case the machine has a
non-default hostname). Now, since we use 'localhost' explicitly, this
will not happen.
Change-Id: I89990cff46e64120262e250eee9238b49c4edee4
Signed-off-by: Yoav Kleinberger <ykleinbe(a)redhat.com>
---
M tests/functional/storageTests.py
1 file changed, 2 insertions(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/07/28107/1
diff --git a/tests/functional/storageTests.py b/tests/functional/storageTests.py
index 80ba312..76ca91d 100644
--- a/tests/functional/storageTests.py
+++ b/tests/functional/storageTests.py
@@ -79,7 +79,8 @@
isSSL = config.getboolean('vars', 'ssl')
if isSSL and os.geteuid() != 0:
raise SkipTest("Must be root to use SSL connection to server")
- self.s = vdscli.connect(useSSL=isSSL)
+ address = 'localhost:%s' % config.get('addresses', 'management_port')
+ self.s = vdscli.connect(hostPort=address, useSSL=isSSL)
def assertVdsOK(self, vdsResult):
# code == 0 means OK
--
To view, visit http://gerrit.ovirt.org/28107
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I89990cff46e64120262e250eee9238b49c4edee4
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Yoav Kleinberger <ykleinbe(a)redhat.com>
7 years, 8 months
Change in vdsm[master]: multipath: Move all calls to multipath exe to a single method
by smizrahi@redhat.com
Saggi Mizrahi has uploaded a new change for review.
Change subject: multipath: Move all calls to multipath exe to a single method
......................................................................
multipath: Move all calls to multipath exe to a single method
This makes the code a bit cleaner
Change-Id: I52afc07a07a925ed7572eb369deb7c203edb04cd
Signed-off-by: Saggi Mizrahi <smizrahi(a)redhat.com>
---
M vdsm/storage/multipath.py
1 file changed, 11 insertions(+), 4 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/55/19255/1
diff --git a/vdsm/storage/multipath.py b/vdsm/storage/multipath.py
index 924d747..c31b5c3 100644
--- a/vdsm/storage/multipath.py
+++ b/vdsm/storage/multipath.py
@@ -94,6 +94,10 @@
)
+def _runCmd(args):
+ return misc.execCmd([constants.EXT_MULTIPATH] + args, sudo=True)
+
+
def rescan():
"""
Forces multipath daemon to rescan the list of available devices and
@@ -108,8 +112,8 @@
supervdsm.getProxy().forceScsiScan()
# Now let multipath daemon pick up new devices
- cmd = [constants.EXT_MULTIPATH, "-r"]
- misc.execCmd(cmd, sudo=True)
+
+ _runCmd("-r")
def isEnabled():
@@ -154,6 +158,10 @@
return False
+def flushAll():
+ _runCmd("-F")
+
+
def setupMultipath():
"""
Set up the multipath daemon configuration to the known and
@@ -173,8 +181,7 @@
raise se.MultipathSetupError()
misc.persistFile(MPATH_CONF)
- # Flush all unused multipath device maps
- misc.execCmd([constants.EXT_MULTIPATH, "-F"], sudo=True)
+ flushAll()
cmd = [constants.EXT_VDSM_TOOL, "service-reload", "multipathd"]
rc = misc.execCmd(cmd, sudo=True)[0]
--
To view, visit http://gerrit.ovirt.org/19255
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I52afc07a07a925ed7572eb369deb7c203edb04cd
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Saggi Mizrahi <smizrahi(a)redhat.com>
7 years, 9 months
Change in vdsm[master]: [WIP]Java Bindings: Proton support in Java Bindings
by smizrahi@redhat.com
Saggi Mizrahi has uploaded a new change for review.
Change subject: [WIP]Java Bindings: Proton support in Java Bindings
......................................................................
[WIP]Java Bindings: Proton support in Java Bindings
Change-Id: I94c52e118cb63d7df84b89a9b93da7b9e477be91
Signed-off-by: Saggi Mizrahi <smizrahi(a)redhat.com>
---
A client/java/vdsm-json-rpc/src/main/java/org/ovirt/vdsm/reactors/ProtonAuthenticator.java
A client/java/vdsm-json-rpc/src/main/java/org/ovirt/vdsm/reactors/ProtonClient.java
A client/java/vdsm-json-rpc/src/main/java/org/ovirt/vdsm/reactors/ProtonListener.java
A client/java/vdsm-json-rpc/src/main/java/org/ovirt/vdsm/reactors/ProtonReactor.java
A client/java/vdsm-json-rpc/src/test/java/org/ovirt/vdsm/jsonrpc/AmqpReactorTestHelper.java
A client/java/vdsm-json-rpc/src/test/java/org/ovirt/vdsm/jsonrpc/TestJsonRpcClientAMQP.java
6 files changed, 844 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/28/15428/1
diff --git a/client/java/vdsm-json-rpc/src/main/java/org/ovirt/vdsm/reactors/ProtonAuthenticator.java b/client/java/vdsm-json-rpc/src/main/java/org/ovirt/vdsm/reactors/ProtonAuthenticator.java
new file mode 100644
index 0000000..35c9099
--- /dev/null
+++ b/client/java/vdsm-json-rpc/src/main/java/org/ovirt/vdsm/reactors/ProtonAuthenticator.java
@@ -0,0 +1,98 @@
+package org.ovirt.vdsm.reactors;
+
+import java.io.IOException;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import org.apache.qpid.proton.driver.Connector;
+import org.apache.qpid.proton.engine.Sasl;
+import org.apache.qpid.proton.engine.Sasl.SaslOutcome;
+import org.apache.qpid.proton.engine.Sasl.SaslState;
+import org.ovirt.vdsm.reactors.ProtonAuthenticator.AuthenticatorType;
+
+public final class ProtonAuthenticator {
+
+ public enum AuthenticatorType {
+
+ SERVER, CLIENT
+ }
+
+ public enum ConnectionState {
+
+ AUTHENTICATING, CONNECTED, FAILED
+ }
+ private ConnectionState _state;
+ final private AuthenticatorType _authType;
+ final private Connector<?> _connector;
+
+ public ProtonAuthenticator(Connector<?> connector,
+ AuthenticatorType authType) {
+ _authType = authType;
+ setState(ConnectionState.AUTHENTICATING);
+ _connector = connector;
+ final Sasl sasl = _connector.sasl();
+ if (authType == AuthenticatorType.CLIENT) {
+ sasl.setMechanisms(new String[]{"ANONYMOUS"});
+ sasl.client();
+ }
+ }
+
+ private void setState(ConnectionState state) {
+ _state = state;
+ }
+
+ public ConnectionState getState() {
+ return _state;
+ }
+
+ public void authenticate() {
+ final Sasl sasl = _connector.sasl();
+
+ while (true) {
+ try {
+ this._connector.process();
+ } catch (IOException ex) {
+ return;
+ }
+ final SaslState state = sasl.getState();
+ switch (state) {
+ case PN_SASL_CONF:
+ if (_authType == AuthenticatorType.SERVER) {
+ sasl.setMechanisms(new String[]{"ANONYMOUS"});
+ sasl.server();
+ }
+ break;
+ case PN_SASL_STEP:
+ if (_authType == AuthenticatorType.SERVER) {
+ final String[] mechs = sasl.getRemoteMechanisms();
+ if (mechs.length < 1) {
+ sasl.done(SaslOutcome.PN_SASL_AUTH);
+ break;
+ }
+
+ final String mech = mechs[0];
+ if (mech.equals("ANONYMOUS")) {
+ sasl.done(SaslOutcome.PN_SASL_OK);
+ } else {
+ sasl.done(SaslOutcome.PN_SASL_AUTH);
+ }
+ }
+ return;
+ case PN_SASL_PASS:
+ this.setState(ConnectionState.CONNECTED);
+ return;
+ case PN_SASL_FAIL:
+ this.setState(ConnectionState.FAILED);
+ return;
+ case PN_SASL_IDLE:
+
+ break;
+ default:
+ return;
+ }
+ }
+ }
+
+ public AuthenticatorType getAuthType() {
+ return _authType;
+ }
+}
diff --git a/client/java/vdsm-json-rpc/src/main/java/org/ovirt/vdsm/reactors/ProtonClient.java b/client/java/vdsm-json-rpc/src/main/java/org/ovirt/vdsm/reactors/ProtonClient.java
new file mode 100644
index 0000000..4baffbf
--- /dev/null
+++ b/client/java/vdsm-json-rpc/src/main/java/org/ovirt/vdsm/reactors/ProtonClient.java
@@ -0,0 +1,224 @@
+package org.ovirt.vdsm.reactors;
+
+import java.nio.ByteBuffer;
+import java.util.Calendar;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.Future;
+
+import javax.swing.event.EventListenerList;
+
+import org.apache.qpid.proton.amqp.Binary;
+import org.apache.qpid.proton.amqp.messaging.Data;
+import org.apache.qpid.proton.amqp.messaging.Section;
+import org.apache.qpid.proton.engine.Delivery;
+import org.apache.qpid.proton.engine.EndpointState;
+import org.apache.qpid.proton.engine.Link;
+import org.apache.qpid.proton.engine.Receiver;
+import org.apache.qpid.proton.engine.Sender;
+import org.apache.qpid.proton.engine.Session;
+import org.apache.qpid.proton.message.Message;
+import org.apache.qpid.proton.message.MessageFactory;
+import org.apache.qpid.proton.message.impl.MessageFactoryImpl;
+
+public final class ProtonClient implements ReactorClient {
+ private final ProtonReactor _reactor;
+ final private Session _ssn;
+
+ private Sender _sender;
+ private Receiver _receiver;
+
+ private final int _CREDIT = 10;
+ private final ConcurrentLinkedQueue<ByteBuffer> _outbox;
+ private final EventListenerList _eventListeners;
+ private final int _deliveryTimeoutSec;
+ private final MessageFactory _msgFactory;
+
+ public ProtonClient(ProtonReactor reactor, Session session) {
+ _ssn = session;
+ _sender = null;
+ _receiver = null;
+ _outbox = new ConcurrentLinkedQueue<>();
+ _eventListeners = new EventListenerList();
+ _deliveryTimeoutSec = 60 * 3;
+ _reactor = reactor;
+ _msgFactory = new MessageFactoryImpl();
+ }
+
+ @Override
+ public void addEventListener(EventListener el) {
+ synchronized (_eventListeners) {
+ _eventListeners.add(EventListener.class, el);
+ }
+ }
+
+ @Override
+ public void removeEventListener(EventListener el) {
+ synchronized (_eventListeners) {
+ _eventListeners.remove(EventListener.class, el);
+ }
+ }
+
+ private void emitOnMessageReceived(ByteBuffer message) {
+ synchronized (_eventListeners) {
+ final Class<EventListener> cls = EventListener.class;
+ final EventListener[] els = _eventListeners.getListeners(cls);
+ for (EventListener el : els) {
+ el.onMessageReceived(this, message);
+ }
+ }
+ }
+
+ @Override
+ public void sendMessage(ByteBuffer message) {
+ _outbox.add(message);
+ _reactor.wakeup();
+ }
+
+ public void addLink(Link link) {
+ assert (link.getSession().equals(_ssn));
+
+ if (link instanceof Sender) {
+ if (_sender != null) {
+ // already have a sender
+ link.close();
+ return;
+ }
+
+ _sender = (Sender) link;
+ } else {
+ assert (link instanceof Receiver);
+ if (_receiver != null) {
+ // already have a receiver
+ link.close();
+ return;
+ }
+
+ _receiver = (Receiver) link;
+ _receiver.flow(_CREDIT);
+ }
+ link.open();
+ }
+
+ private Message _popOutgoingMessage() {
+ final ByteBuffer data = _outbox.poll();
+ if (data == null) {
+ return null;
+ }
+
+ final Section body = new Data(Binary.create(data));
+ final Message msg = _msgFactory.createMessage();
+ msg.setBody(body);
+ msg.setAddress(_sender.getTarget().toString());
+ return msg;
+ }
+
+ public void queueDeliveries() {
+ if (_sender == null) {
+ final String uuid = UUID.randomUUID().toString();
+ _sender = _ssn.sender("Sender-" + uuid);
+ }
+
+ while (_sender.getCredit() > 0) {
+ final Message m = _popOutgoingMessage();
+ if (m == null) {
+ return;
+ }
+
+ final String uuid = UUID.randomUUID().toString();
+ final Delivery d = _sender
+ .delivery(("outgoing-" + uuid).getBytes());
+ d.setContext(m);
+ }
+ }
+
+ public void processDelivery(Delivery delivery) {
+ assert (_ssn.equals(delivery.getLink().getSession()));
+
+ if (delivery.isReadable()) {
+ _processIncomingDelivery(delivery);
+ } else {
+ assert (delivery.isWritable());
+ _processOutgoingDelivery(delivery);
+ }
+ }
+
+ private void _processOutgoingDelivery(Delivery delivery) {
+ final Sender link = (Sender) delivery.getLink();
+ assert (link.equals(_sender));
+
+ final Message msg = (Message) delivery.getContext();
+ // TBD: Buffer can be reused forever. Change in case of
+ // performance issues.
+ ByteBuffer buff;
+ int i = 1;
+ int written = 0;
+ do {
+ buff = ByteBuffer.allocate(i * 4096);
+ written = msg.encode(buff.array(), 0, buff.capacity());
+ i++;
+ } while (written == buff.capacity());
+
+ link.send(buff.array(), 0, written);
+ if (link.advance()) {
+ // Attach timeout to the delivery
+ final Calendar calendar = Calendar.getInstance();
+ calendar.add(Calendar.SECOND, _deliveryTimeoutSec);
+ delivery.setContext(calendar);
+ }
+ }
+
+ private void _processIncomingDelivery(Delivery delivery) {
+ int total = 0;
+ int read = 0;
+ ByteBuffer buff = ByteBuffer.allocate(4096);
+
+ while (read >= 0) {
+ total += read;
+ if (total >= buff.capacity()) {
+ final ByteBuffer buff2 = ByteBuffer
+ .allocate(buff.capacity() * 2);
+ buff2.put(buff);
+ buff = buff2;
+ }
+ read = _receiver.recv(buff.array(), total, buff.capacity() - total);
+ }
+
+ final Message msg = _msgFactory.createMessage();
+ msg.decode(buff.array(), 0, total);
+
+ assert (msg.getBody() instanceof Data);
+ final Data body = (Data) msg.getBody();
+ final ByteBuffer bb = body.getValue().asByteBuffer();
+ delivery.settle();
+ emitOnMessageReceived(bb);
+ }
+
+ @Override
+ public Future<Void> close() {
+ final Session ssn = _ssn;
+ return _reactor.queueOperation(new Callable<Void>() {
+ @Override
+ public Void call() {
+ ssn.close();
+ return null;
+ }
+ });
+ }
+
+ @Override
+ public boolean closed() {
+ return _ssn.getLocalState().equals(EndpointState.CLOSED);
+ }
+
+ public void removeLink(Link link) {
+ if (link.equals(_sender)) {
+ _sender = null;
+ } else {
+ assert (link.equals(_receiver));
+ _receiver = null;
+ }
+ link.close();
+ }
+}
diff --git a/client/java/vdsm-json-rpc/src/main/java/org/ovirt/vdsm/reactors/ProtonListener.java b/client/java/vdsm-json-rpc/src/main/java/org/ovirt/vdsm/reactors/ProtonListener.java
new file mode 100644
index 0000000..35896f4
--- /dev/null
+++ b/client/java/vdsm-json-rpc/src/main/java/org/ovirt/vdsm/reactors/ProtonListener.java
@@ -0,0 +1,42 @@
+package org.ovirt.vdsm.reactors;
+
+import java.io.IOException;
+import java.util.concurrent.Callable;
+import java.util.concurrent.Future;
+
+import org.apache.qpid.proton.driver.Listener;
+
+public final class ProtonListener implements ReactorListener {
+ private final EventListener _eventListener;
+ private Listener<ProtonListener> _listener;
+ private final ProtonReactor _reactor;
+
+ public ProtonListener(ProtonReactor reactor, EventListener eventListener) {
+ _eventListener = eventListener;
+ _reactor = reactor;
+ }
+
+ public void setListener(Listener<ProtonListener> l) {
+ _listener = l;
+ }
+
+ public void accept(ReactorClient client) {
+ _eventListener.onAcccept(this, client);
+ }
+
+ @Override
+ public Future<Void> close() {
+ final Listener<ProtonListener> l = _listener;
+ return _reactor.queueOperation(new Callable<Void>() {
+ @Override
+ public Void call() {
+ try {
+ l.close();
+ } catch (IOException e) {
+ // already closed
+ }
+ return null;
+ }
+ });
+ }
+}
diff --git a/client/java/vdsm-json-rpc/src/main/java/org/ovirt/vdsm/reactors/ProtonReactor.java b/client/java/vdsm-json-rpc/src/main/java/org/ovirt/vdsm/reactors/ProtonReactor.java
new file mode 100644
index 0000000..b5a38b4
--- /dev/null
+++ b/client/java/vdsm-json-rpc/src/main/java/org/ovirt/vdsm/reactors/ProtonReactor.java
@@ -0,0 +1,452 @@
+package org.ovirt.vdsm.reactors;
+
+import java.io.IOException;
+import java.util.Calendar;
+import java.util.EnumSet;
+import java.util.Iterator;
+import java.util.concurrent.Callable;
+import java.util.concurrent.Future;
+import java.util.concurrent.FutureTask;
+
+import org.apache.qpid.proton.driver.Connector;
+import org.apache.qpid.proton.driver.Driver;
+import org.apache.qpid.proton.driver.Listener;
+import org.apache.qpid.proton.driver.impl.DriverFactoryImpl;
+import org.apache.qpid.proton.engine.Connection;
+import org.apache.qpid.proton.engine.Delivery;
+import org.apache.qpid.proton.engine.EndpointState;
+import org.apache.qpid.proton.engine.EngineFactory;
+import org.apache.qpid.proton.engine.Link;
+import org.apache.qpid.proton.engine.Receiver;
+import org.apache.qpid.proton.engine.Session;
+import org.apache.qpid.proton.engine.impl.EngineFactoryImpl;
+import org.ovirt.vdsm.reactors.ProtonAuthenticator.AuthenticatorType;
+import org.ovirt.vdsm.reactors.ProtonAuthenticator.ConnectionState;
+import org.ovirt.vdsm.util.ChainedOperation;
+import org.ovirt.vdsm.util.ReactorScheduler;
+
+public final class ProtonReactor implements Reactor {
+
+ private final Driver _driver;
+ private final ReactorScheduler _scheduler;
+ private boolean _isRunning;
+ final Object _syncRoot = new Object();
+ final ProtonReactor reactor = this;
+ private EngineFactory _engineFactory;
+
+ public boolean isRunning() {
+ return _isRunning;
+ }
+
+ public ProtonReactor() throws IOException {
+ _engineFactory = new EngineFactoryImpl();
+ _driver = new DriverFactoryImpl().createDriver();
+ _isRunning = false;
+ _scheduler = new ReactorScheduler();
+ }
+
+ @Override
+ public void finalize() throws Throwable {
+ try {
+ _driver.destroy();
+ } finally {
+ super.finalize();
+ }
+ }
+
+ // Creates a listener, returns null if failed to bind or reactor is not
+ // running;
+ @Override
+ public Future<ReactorListener> createListener(final String host,
+ final int port,
+ final ReactorListener.EventListener eventListener) {
+
+ return queueOperation(new Callable<ReactorListener>() {
+ @Override
+ public ProtonListener call() {
+
+ final ProtonListener listener = new ProtonListener(reactor, eventListener);
+ final Listener<ProtonListener> l = _driver.createListener(host,
+ port, listener);
+
+ listener.setListener(l);
+
+ if (l == null) {
+ return null;
+ }
+
+ return listener;
+ }
+ });
+ }
+
+ @Override
+ public Future<ReactorClient> createClient(final String host, final int port) {
+ final Driver driver = _driver;
+ final EngineFactory engineFactory = _engineFactory;
+
+ return queueOperation(new ChainedOperation.Operation<ReactorClient>() {
+ final private int _INIT = 1;
+ final private int _AUTHENTICATE = 2;
+ final private int _DONE = 3;
+ private int _state;
+ final private Driver _driver;
+ final private ProtonReactor _reactor;
+ private Connector<ProtonAuthenticator> _connector;
+ private ProtonAuthenticator _auth;
+ private boolean _done;
+ private boolean _cancelled;
+ private ReactorClient _result;
+ private EngineFactory _engineFactory;
+
+ {
+ _driver = driver;
+ _reactor = reactor;
+ _state = _INIT;
+ _done = false;
+ _cancelled = false;
+ _engineFactory = engineFactory;
+ }
+
+ @Override
+ public void call(final boolean cancelled) {
+ switch (_state) {
+ case _INIT:
+ if (cancelled) {
+ _cancelled = true;
+ _done = true;
+ return;
+ }
+
+ _connector = this._driver.createConnector(host, port, null);
+
+ final Connection connection = engineFactory.createConnection();
+ _connector.setConnection(connection);
+ _auth = new ProtonAuthenticator(_connector,
+ AuthenticatorType.CLIENT);
+ _connector.setContext(_auth);
+ connection.open();
+ _state = _AUTHENTICATE;
+ case _AUTHENTICATE:
+ if (cancelled) {
+ _cancelled = true;
+ _close();
+ return;
+ }
+
+ switch (_auth.getState()) {
+ case AUTHENTICATING:
+ _auth.authenticate();
+ try {
+ _connector.process();
+ } catch (IOException e) {
+ // ignore
+ }
+ return;
+ case FAILED:
+ _close();
+ return;
+ case CONNECTED:
+ // Success !
+ break;
+ }
+
+ Session ssn = _connector.getConnection().session();
+ ssn.open();
+ _result = new ProtonClient(_reactor, ssn);
+ ssn.setContext(_result);
+ _done = true;
+ _state = _DONE;
+ }
+ }
+
+ private void _close() {
+ _connector.getConnection().close();
+ _connector.close();
+ _done = true;
+ _result = null;
+ }
+
+ @Override
+ public boolean isDone() {
+ return _done;
+ }
+
+ @Override
+ public boolean isCancelled() {
+ return _cancelled;
+ }
+
+ @Override
+ public ReactorClient getResult() {
+ return _result;
+ }
+ });
+ }
+
+ // Queues operation to be run in the serving loop.
+ public <T> Future<T> queueOperation(Callable<T> cb) {
+ final FutureTask<T> task = new FutureTask<>(cb);
+ _queueFuture(task);
+ return task;
+ }
+
+ public <T> Future<T> queueOperation(ChainedOperation.Operation<T> op) {
+ final ChainedOperation<T> task = new ChainedOperation<>(op);
+ _queueFuture(task);
+ return task;
+ }
+
+ private void _queueFuture(Future<?> op) {
+ synchronized (_scheduler) {
+ _scheduler.queueFuture(op);
+ wakeup();
+ }
+ }
+
+ private void _waitEvents() {
+ _driver.doWait(0);
+ }
+
+ public void wakeup() {
+ _driver.wakeup();
+ }
+
+ @Override
+ public void serve() {
+ synchronized (_syncRoot) {
+ _isRunning = true;
+ }
+
+ while (_isRunning) {
+ //_waitEvents();
+ synchronized (_scheduler) {
+ _scheduler.performPendingOperations();
+ }
+ _acceptConnectionRequests();
+ _processConnectors();
+ }
+ }
+
+ private void _processConnectors() {
+ for (Connector<?> connector = _driver.connector(); connector != null; connector = _driver
+ .connector()) {
+ if (connector.isClosed()) {
+ connector.destroy();
+ continue;
+ }
+
+ try {
+ connector.process();
+ } catch (IOException e) {
+ continue;
+ }
+
+ final Object ctx = connector.getContext();
+ assert (ctx instanceof ProtonAuthenticator);
+
+ if (ctx instanceof ProtonAuthenticator) {
+ final ProtonAuthenticator auth = (ProtonAuthenticator) ctx;
+ ConnectionState cs = auth.getState();
+ if (cs.equals(ConnectionState.AUTHENTICATING)) {
+ auth.authenticate();
+ cs = auth.getState();
+ }
+
+ if (cs.equals(ConnectionState.CONNECTED)) {
+ if (connector.getConnection() == null) {
+ connector.setConnection(_engineFactory.createConnection());
+ }
+ _processConnector(connector);
+ }
+ }
+
+ try {
+ connector.process();
+ } catch (IOException e) {
+ continue;
+ }
+ }
+ }
+
+ private void _processConnector(Connector<?> connector) {
+ _initConnection(connector);
+ _openPendingSessions(connector);
+ _openLinks(connector);
+ _queueOutgoingDeliveries(connector);
+ _processDeliveries(connector);
+ _cleanDeliveries(connector);
+ _cleanLinks(connector);
+ _cleanSessions(connector);
+ }
+
+ private void _cleanSessions(Connector<?> connector) {
+ final Connection conn = connector.getConnection();
+ final EnumSet<EndpointState> localState = EnumSet
+ .of(EndpointState.ACTIVE);
+ final EnumSet<EndpointState> remoteState = EnumSet
+ .of(EndpointState.CLOSED);
+
+ for (Session ssn = conn.sessionHead(localState, remoteState); ssn != null; ssn
+ .next(localState, remoteState)) {
+
+ ssn.close();
+ }
+ }
+
+ private void _cleanLinks(Connector<?> connector) {
+ final Connection conn = connector.getConnection();
+ final EnumSet<EndpointState> localState = EnumSet
+ .of(EndpointState.ACTIVE);
+ final EnumSet<EndpointState> remoteState = EnumSet
+ .of(EndpointState.CLOSED);
+
+ for (Link link = conn.linkHead(localState, remoteState); link != null; link = link
+ .next(localState, remoteState)) {
+
+ final ProtonClient ssn = _getClient(link.getSession());
+ ssn.removeLink(link);
+ }
+ }
+
+ private void _cleanDeliveries(Connector<?> connector) {
+ final Connection conn = connector.getConnection();
+ final EnumSet<EndpointState> localState = EnumSet
+ .of(EndpointState.ACTIVE);
+ final EnumSet<EndpointState> remoteState = EnumSet
+ .allOf(EndpointState.class);
+ for (Link link = conn.linkHead(localState, remoteState); link != null; link = link
+ .next(localState, remoteState)) {
+
+ if (link instanceof Receiver) {
+ // We settle all incoming deliveries upon receive
+ continue;
+ }
+
+ Delivery d;
+ final Calendar now = Calendar.getInstance();
+ for (Iterator<Delivery> iter = link.unsettled(); iter.hasNext();) {
+ d = iter.next();
+ Object ctx = d.getContext();
+ if (!(ctx instanceof Calendar)) {
+ // Has not been sent yet
+ continue;
+ }
+
+ final Calendar timeout = (Calendar) ctx;
+ boolean remoteClosed = link.getRemoteState().equals(
+ EndpointState.CLOSED);
+ boolean timedOut = now.after(timeout);
+ if (d.remotelySettled() || timedOut || remoteClosed) {
+ d.settle();
+ d.free();
+ }
+ }
+
+ }
+
+ }
+
+ private void _processDeliveries(Connector<?> connector) {
+ final Connection conn = connector.getConnection();
+ for (Delivery delivery = conn.getWorkHead(); delivery != null; delivery = delivery
+ .getWorkNext()) {
+
+ final ProtonClient client = _getClient(delivery.getLink()
+ .getSession());
+ client.processDelivery(delivery);
+ }
+ }
+
+ private void _queueOutgoingDeliveries(Connector<?> connector) {
+ final Connection conn = connector.getConnection();
+
+ final EnumSet<EndpointState> localState = EnumSet
+ .of(EndpointState.ACTIVE);
+ final EnumSet<EndpointState> remoteState = EnumSet
+ .allOf(EndpointState.class);
+
+ for (Session ssn = conn.sessionHead(localState, remoteState); ssn != null; ssn = ssn
+ .next(localState, remoteState)) {
+
+ final ProtonClient client = _getClient(ssn);
+ client.queueDeliveries();
+ }
+ }
+
+ private void _openLinks(Connector<?> connector) {
+ final Connection conn = connector.getConnection();
+ final EnumSet<EndpointState> localState = EnumSet
+ .of(EndpointState.UNINITIALIZED);
+ final EnumSet<EndpointState> remoteState = EnumSet
+ .allOf(EndpointState.class);
+ for (Link link = conn.linkHead(localState, remoteState); link != null; link = link
+ .next(localState, remoteState)) {
+
+ // configure the link
+ link.setSource(link.getRemoteSource());
+ link.setTarget(link.getRemoteTarget());
+
+ final ProtonClient client = _getClient(link.getSession());
+ client.addLink(link);
+ }
+ }
+
+ private ProtonClient _getClient(Session ssn) {
+ return (ProtonClient) ssn.getContext();
+ }
+
+ private void _openPendingSessions(Connector<?> connector) {
+ final Connection conn = connector.getConnection();
+ final EnumSet<EndpointState> localState = EnumSet
+ .of(EndpointState.UNINITIALIZED);
+ final EnumSet<EndpointState> remoteState = EnumSet
+ .allOf(EndpointState.class);
+
+ for (Session ssn = conn.sessionHead(localState, remoteState); ssn != null; ssn = ssn
+ .next(localState, remoteState)) {
+
+ final ProtonClient client = new ProtonClient(this, ssn);
+ ssn.setContext(client);
+ final Object ctx = connector.getContext();
+ assert (ctx instanceof ProtonAuthenticator);
+ ProtonAuthenticator auth = (ProtonAuthenticator) ctx;
+ if (auth.getAuthType() == AuthenticatorType.SERVER) {
+ ssn.open();
+ final ProtonListener l = (ProtonListener) ctx;
+ l.accept(client);
+ } else {
+ ssn.close();
+ }
+ }
+ }
+
+ private void _initConnection(Connector<?> connector) {
+ final Connection conn = connector.getConnection();
+ if (conn.getLocalState().equals(EndpointState.UNINITIALIZED)) {
+ conn.open();
+ }
+ }
+
+ private void _acceptConnectionRequests() {
+ for (final Listener<?> l : _driver.listeners()) {
+
+ @SuppressWarnings("unchecked")
+ final Connector<ProtonAuthenticator> connector = (Connector<ProtonAuthenticator>) l
+ .accept();
+ if (connector == null) {
+ continue;
+ }
+ connector.setContext(new ProtonAuthenticator(connector,
+ AuthenticatorType.SERVER));
+ }
+ }
+
+ public void stop() {
+ synchronized (_syncRoot) {
+ _isRunning = false;
+ }
+
+ wakeup();
+ }
+}
\ No newline at end of file
diff --git a/client/java/vdsm-json-rpc/src/test/java/org/ovirt/vdsm/jsonrpc/AmqpReactorTestHelper.java b/client/java/vdsm-json-rpc/src/test/java/org/ovirt/vdsm/jsonrpc/AmqpReactorTestHelper.java
new file mode 100644
index 0000000..46d9cc3
--- /dev/null
+++ b/client/java/vdsm-json-rpc/src/test/java/org/ovirt/vdsm/jsonrpc/AmqpReactorTestHelper.java
@@ -0,0 +1,18 @@
+package org.ovirt.vdsm.jsonrpc;
+
+import java.io.IOException;
+import org.ovirt.vdsm.reactors.ProtonReactor;
+import org.ovirt.vdsm.reactors.Reactor;
+
+public class AmqpReactorTestHelper implements ReactorTestHelper {
+ @Override
+ public Reactor createReactor() throws IOException {
+ return new ProtonReactor();
+ }
+
+ @Override
+ public String getUriScheme() {
+ return "amqp";
+ }
+
+}
diff --git a/client/java/vdsm-json-rpc/src/test/java/org/ovirt/vdsm/jsonrpc/TestJsonRpcClientAMQP.java b/client/java/vdsm-json-rpc/src/test/java/org/ovirt/vdsm/jsonrpc/TestJsonRpcClientAMQP.java
new file mode 100644
index 0000000..9e0c24c
--- /dev/null
+++ b/client/java/vdsm-json-rpc/src/test/java/org/ovirt/vdsm/jsonrpc/TestJsonRpcClientAMQP.java
@@ -0,0 +1,10 @@
+package org.ovirt.vdsm.jsonrpc;
+
+public class TestJsonRpcClientAMQP extends TestJsonRpcClient {
+
+ @Override
+ protected ReactorTestHelper getHelper() {
+ return new AmqpReactorTestHelper();
+ }
+
+}
--
To view, visit http://gerrit.ovirt.org/15428
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I94c52e118cb63d7df84b89a9b93da7b9e477be91
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Saggi Mizrahi <smizrahi(a)redhat.com>
7 years, 9 months
Change in vdsm[master]: vdsm: expose release call to engine and via vdsClient
by Martin Polednik
Martin Polednik has uploaded a new change for review.
Change subject: vdsm: expose release call to engine and via vdsClient
......................................................................
vdsm: expose release call to engine and via vdsClient
Release would, in ideal world, always be called when VM dies.
Exposing this verbs gives users tool to force release if series
of events lead to situation that should never occur(tm).
Change-Id: I920ae8b5c82134f09a12e56a529fa3d30fd7ab53
Signed-off-by: Martin Polednik <mpolednik(a)redhat.com>
---
M client/vdsClient.py
M vdsm/API.py
M vdsm/rpc/BindingXMLRPC.py
3 files changed, 17 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/94/32394/1
diff --git a/client/vdsClient.py b/client/vdsClient.py
index 783afa6..85d00cd 100644
--- a/client/vdsClient.py
+++ b/client/vdsClient.py
@@ -521,6 +521,10 @@
def do_getAllVmStats(self, args):
return self.ExecAndExit(self.s.getAllVmStats())
+ def hostdevRelease(self, args):
+ device_name = args[0]
+ return self.ExecAndExit(self.s.hostdevRelease(device_name))
+
def desktopLogin(self, args):
vmId, domain, user, password = tuple(args[:4])
if len(args) > 4:
@@ -2149,6 +2153,10 @@
('',
'Get Statistics info for all existing VMs'
)),
+ 'hostdevRelease': (serv.do_hostdevRelease,
+ ('<deviceName>',
+ 'Release specified device from any VMs on the host'
+ )),
'getVGList': (serv.getVGList,
('storageType',
'List of all VGs.'
diff --git a/vdsm/API.py b/vdsm/API.py
index 8da6030..6269d72 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -1296,6 +1296,10 @@
devices = hostdev.list_by_caps(self._cif.vmContainer, caps)
return {'status': doneCode, 'deviceList': devices}
+ def hostdevRelease(self, device_name):
+ hostdev.release(device_name)
+ return {'status': doneCode}
+
def getStats(self):
"""
Report host statistics.
diff --git a/vdsm/rpc/BindingXMLRPC.py b/vdsm/rpc/BindingXMLRPC.py
index 4c1ef20..bdd0886 100644
--- a/vdsm/rpc/BindingXMLRPC.py
+++ b/vdsm/rpc/BindingXMLRPC.py
@@ -487,6 +487,10 @@
api = API.Global()
return api.hostdevListByCaps(caps)
+ def hostdevRelease(self, device_name):
+ api = API.Global()
+ return api.hostdevRelease(device_name)
+
def vmGetIoTunePolicy(self, vmId):
vm = API.VM(vmId)
return vm.getIoTunePolicy()
@@ -1004,6 +1008,7 @@
(self.vmGetStats, 'getVmStats'),
(self.getAllVmStats, 'getAllVmStats'),
(self.hostdevListByCaps, 'hostdevListByCaps'),
+ (self.hostdevRelease, 'hostdevRelease'),
(self.vmMigrationCreate, 'migrationCreate'),
(self.vmDesktopLogin, 'desktopLogin'),
(self.vmDesktopLogoff, 'desktopLogoff'),
--
To view, visit http://gerrit.ovirt.org/32394
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I920ae8b5c82134f09a12e56a529fa3d30fd7ab53
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Martin Polednik <mpolednik(a)redhat.com>
7 years, 9 months
Change in vdsm[master]: vdsm hostdev: add support for USB devices
by mpoledni@redhat.com
Martin Polednik has uploaded a new change for review.
Change subject: vdsm hostdev: add support for USB devices
......................................................................
vdsm hostdev: add support for USB devices
Libvirt allows passthrough of USB devices (not busses) - this patch
exposes the functionality in vdsm
Change-Id: Iac74e7537d56bcb940ef07a4654d45cbcdbb1fb0
Signed-off-by: Martin Polednik <mpoledni(a)redhat.com>
---
M vdsm/caps.py
M vdsm/rpc/vdsmapi-schema.json
M vdsm/virt/vm.py
3 files changed, 71 insertions(+), 10 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/54/29054/1
diff --git a/vdsm/caps.py b/vdsm/caps.py
index d937d4e..8c16af6 100644
--- a/vdsm/caps.py
+++ b/vdsm/caps.py
@@ -530,7 +530,12 @@
# back that we could use to uniquely identify and initiate a device
continue
- if capability in ('pci',):
+ if capability in ('pci', 'usb_device'):
+ # Libvirt only allows to attach USB device with capability 'usb',
+ # but the bus identifies itself as 'usb' while device as
+ # 'usb_device'
+ if dev['capability'] == 'usb_device':
+ dev['capability'] = 'usb'
devices.append(dev)
return devices
diff --git a/vdsm/rpc/vdsmapi-schema.json b/vdsm/rpc/vdsmapi-schema.json
index 921f3d4..ac10144 100644
--- a/vdsm/rpc/vdsmapi-schema.json
+++ b/vdsm/rpc/vdsmapi-schema.json
@@ -3156,33 +3156,55 @@
'specParams': 'VmRngDeviceSpecParams'}}
##
+# @StartupPolicy:
+#
+# Possible policies for startup with device
+#
+# @mandatory: fail if missing for any reason (the default)
+#
+# @requisite: fail if missing on boot up, drop if missing
+# on migrate/restore/revert
+#
+# @optional: drop if missing at any start attempt
+#
+# Since: 4.16.0
+##
+{'enum': 'StartupPolicy', 'data': ['mandatory', 'requisite', 'optional']}
+
+##
# @HostDeviceCapability:
#
# Properties of a host device.
#
# @pci: PCI device
#
+# @usb: USB device
+#
# Since: 4.16.0
##
-{'enum': 'HostDeviceCapability', 'data': ['pci']}
+{'enum': 'HostDeviceCapability', 'data': ['pci', 'usb']}
##
# @HostDeviceSpecParams:
#
# Properties of a host device.
#
-# @bootorder: #optional If specified, this device is part of the boot
-# sequence at the specified position
+# @bootorder: #optional If specified, this device is part of the boot
+# sequence at the specified position (for @pci and @usb)
#
-# @bar: #optional ROM visibility in the guest's memory map (for @pci)
+# @bar: #optional ROM visibility in the guest's
+# memory map (for @pci)
#
-# @file: #optional Binary file to be used as device's ROM (for @pci)
+# @file: #optional Binary file to be used as device's ROM (for @pci)
+#
+# @startupPolicy: #optional Possible boot handling with attached device
+# (for @usb)
#
# Since: 4.16.0
##
{'type': 'HostDeviceSpecParams',
- 'data': {'*bootorder': 'int', '*bar': 'bool',
- '*file': 'str'}}
+ 'data': {'*bootorder': 'int', '*bar': 'bool', '*file': 'str',
+ '*startupPolicy': 'StartupPolicy'}}
##
# @HostDevice:
diff --git a/vdsm/virt/vm.py b/vdsm/virt/vm.py
index 6143cfa..6a6978e 100644
--- a/vdsm/virt/vm.py
+++ b/vdsm/virt/vm.py
@@ -1631,13 +1631,16 @@
self.log.debug('Detaching hostdev %s', self.name)
self._node.dettach()
- def getPciAddr(self):
+ def _parsecaps(self):
capsxml = _domParseStr(self._node.XMLDesc(0)).childNodes[0]
self.log.debug('Got nodeDevice XML:\n%s',
capsxml.toprettyxml(encoding='utf-8'))
- capsxml = capsxml.getElementsByTagName('capability')[0]
+ return capsxml.getElementsByTagName('capability')[0]
+
+ def getPciAddr(self):
+ capsxml = self._parsecaps()
domain = capsxml.getElementsByTagName('domain')[0]. \
firstChild.nodeValue
bus = capsxml.getElementsByTagName('bus')[0].firstChild.nodeValue
@@ -1646,6 +1649,26 @@
self.log.debug('PCI device %s at address {domain: %s bus: %s '
'slot: %s}', self.name, domain, bus, slot)
return {'domain': domain, 'bus': bus, 'slot': slot}
+
+ def getUsbAddr(self):
+ capsxml = self._parsecaps()
+ addr = {}
+
+ addr['bus'] = capsxml.getElementsByTagName('bus')[0].firstChild. \
+ nodeValue
+ addr['device'] = capsxml.getElementsByTagName('device')[0]. \
+ firstChild.nodeValue
+ # TODO: handle nonexistant product_id and vendor_id by not adding them
+ # to addr
+ addr['product_id'] = capsxml.getElementsByTagName('product')[0].\
+ getAttribute('id')
+ addr['vendor_id'] = capsxml.getElementsByTagName('vendor')[0].\
+ getAttribute('id')
+
+ self.log.debug('USB device %s {product: %s, vendor: %s} at address '
+ '{bus: %s device: %s}', self.name, addr['product_id'],
+ addr['vendor_id'], addr['bus'], addr['device'])
+ return addr
def getXML(self):
"""
@@ -1683,6 +1706,17 @@
rom.setAttrs(**romAttrs)
+ elif self.capability == 'usb_device':
+ addr = self.getPciAddr()
+ try:
+ source.appendChildWithArgs('vendor', None, addr['vendor'])
+ source.appendChildWithArgs('vendor', None, addr['product'])
+ except:
+ source.appendChildWithArgs('address', None, **addr)
+
+ if 'startupPolicy' in self.specParams:
+ source.setAttrs(startupPolicy=self.specParams['startupPolicy'])
+
return hostdev
--
To view, visit http://gerrit.ovirt.org/29054
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Iac74e7537d56bcb940ef07a4654d45cbcdbb1fb0
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Martin Polednik <mpoledni(a)redhat.com>
7 years, 9 months