[PATCH] python: Add inquire()
by Nir Soffer
Use sanlock_inquire() to query the resource held by the current process
(using the slkfd= argument) or held by another program (using the pid=
argument).
When using the slkfd= argument, we communicate with sanlock daemon using
slkfd, ensuring that the current process is connected to sanlock. If the
current process is not connected, sanlock assumes that the process is
dead, and release all the leases acquired by the process.
When using the pid= argument, the function opens a new socket to sanlock
daemon and query the status of resources owned by specified pid.
In both cases the information comes from sanlock daemon, without
accessing storage. To verify storage content, the caller should use
read_resource() and read_resource_owners().
The call returns list of resources dicts that can be used for verifying
that sanlock state matches the program state.
sanlock_inquire() reports the SANLOCK_RES_LVER or sanlock.RES_SHARED
flags in the resource flags field. Add the field to the returned dict
and add sanlock constants for the flag.
The resource flags are needed if you want to restore a lease after it
was released, ensuring that nobody else acquired the lease after it was
released. This flow is used by libvirt using libsanlock. With this
change we can implement the same flow using the python binding.
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
python/sanlock.c | 173 +++++++++++++++++++++++++++++++++++++++++++
tests/python_test.py | 84 +++++++++++++++++++++
2 files changed, 257 insertions(+)
diff --git a/python/sanlock.c b/python/sanlock.c
index 67d34fc..238d172 100644
--- a/python/sanlock.c
+++ b/python/sanlock.c
@@ -323,6 +323,88 @@ exit_fail:
return NULL;
}
+/* Convert disks array to list of tuples. */
+static PyObject *
+disks_to_list(struct sanlk_disk *disks, uint32_t disks_count)
+{
+ PyObject *result = NULL;
+ PyObject *disk = NULL;
+
+ result = PyList_New(disks_count);
+ if (result == NULL)
+ return NULL;
+
+ for (uint32_t i = 0; i < disks_count; i++) {
+ disk = Py_BuildValue(
+ "(s,K)",
+ disks[i].path,
+ disks[i].offset);
+ if (disk == NULL)
+ goto exit_fail;
+
+ /* Steals reference to disk. */
+ if (PyList_SetItem(result, i, disk) != 0)
+ goto exit_fail;
+
+ disk = NULL;
+ }
+
+ return result;
+
+exit_fail:
+ Py_XDECREF(result);
+ Py_XDECREF(disk);
+
+ return NULL;
+}
+
+/* Convert resources array returned from sanlock_inquire() to list of resource
+ * dicts. */
+static PyObject *
+resources_to_list(struct sanlk_resource **res, int res_count)
+{
+ PyObject *result = NULL;
+ PyObject *info = NULL;
+ PyObject *disks = NULL;
+
+ if ((result = PyList_New(res_count)) == NULL)
+ return NULL;
+
+ for (int i = 0; i < res_count; i++) {
+ disks = disks_to_list(res[i]->disks, res[i]->num_disks);
+ if (disks == NULL)
+ goto exit_fail;
+
+ /* Steals reference to disks. */
+ info = Py_BuildValue(
+ "{s:y,s:y,s:k,s:K,s:N}",
+ "lockspace", res[i]->lockspace_name,
+ "resource", res[i]->name,
+ "flags", res[i]->flags,
+ "version", res[i]->lver,
+ "disks", disks);
+ if (info == NULL)
+ goto exit_fail;
+
+ disks = NULL;
+
+ /* Steals reference to info. */
+ if (PyList_SetItem(result, i, info) != 0)
+ goto exit_fail;
+
+ info = NULL;
+ }
+
+ return result;
+
+exit_fail:
+ Py_XDECREF(result);
+ Py_XDECREF(info);
+ Py_XDECREF(disks);
+
+ return NULL;
+}
+
/* register */
PyDoc_STRVAR(pydoc_register, "\
register() -> int\n\
@@ -1062,6 +1144,89 @@ finally:
Py_RETURN_NONE;
}
+/* inquire */
+PyDoc_STRVAR(pydoc_inquire, "\
+inquire(slkfd=fd, pid=owner)\n\
+Return list of resources held by current process (using the slkfd \n\
+argument to specify the sanlock file descriptor) or for another \n\
+process (using the pid argument).\n\
+\n\
+Does not access storage. To learn about resource state on storage,\n\
+use sanlock.read_resource() and sanlock.read_resource_owners().\n\
+\n\
+Arguments\n\
+ slkfd (int): The file descriptor returned from sanlock.register().\n\
+ pid (int): The program pid to query.\n\
+\n\
+Returns\n\
+ List of resource dicts with the following keys:\n\
+ lockspace (bytes): lockspace name\n\
+ resource (bytes): resource name\n\
+ flags (int): resource flags (sanlock.RES_*)\n\
+ version (int): resource version\n\
+ disks (list): list of disk tuples (path, offset)\n\
+");
+
+static PyObject *
+py_inquire(PyObject *self __unused, PyObject *args, PyObject *keywds)
+{
+ int sanlockfd = -1;
+ int pid = -1;
+ char *kwlist[] = {"slkfd", "pid", NULL};
+ int rv = -1;
+
+ /* sanlock_inquire() return values. */
+ int res_count = 0;
+ char *res_state = NULL;
+
+ /* Array of resoruces parsed from res_state. */
+ struct sanlk_resource **res_arr = NULL;
+
+ /* List of resource dicts. */
+ PyObject *result = NULL;
+
+ if (!PyArg_ParseTupleAndKeywords(
+ args, keywds, "|ii", kwlist, &sanlockfd, &pid)) {
+ return NULL;
+ }
+
+ /* Check if any of the slkfd or pid parameters was given. */
+ if (sanlockfd == -1 && pid == -1) {
+ set_sanlock_error(-EINVAL, "Invalid slkfd and pid values");
+ return NULL;
+ }
+
+ /* Inquire sanlock (gil disabled) */
+ Py_BEGIN_ALLOW_THREADS
+ rv = sanlock_inquire(sanlockfd, pid, 0, &res_count, &res_state);
+ Py_END_ALLOW_THREADS
+
+ if (rv != 0) {
+ set_sanlock_error(rv, "Inquire error");
+ return NULL;
+ }
+
+ if (res_count > 0) {
+ rv = sanlock_state_to_args(res_state, &res_count, &res_arr);
+ if (rv != 0) {
+ /* TODO: Include res_state in the error. */
+ set_sanlock_error(rv, "Error parsing inquire state string");
+ goto finally;
+ }
+ }
+
+ result = resources_to_list(res_arr, res_count);
+
+finally:
+ free(res_state);
+
+ for (int i = 0; i < res_count; i++)
+ free(res_arr[i]);
+ free(res_arr);
+
+ return result;
+}
+
/* release */
PyDoc_STRVAR(pydoc_release, "\
release(lockspace, resource, disks [, slkfd=fd, pid=owner])\n\
@@ -1752,6 +1917,8 @@ sanlock_methods[] = {
METH_VARARGS|METH_KEYWORDS, pydoc_read_resource_owners},
{"acquire", (PyCFunction) py_acquire,
METH_VARARGS|METH_KEYWORDS, pydoc_acquire},
+ {"inquire", (PyCFunction) py_inquire,
+ METH_VARARGS|METH_KEYWORDS, pydoc_inquire},
{"release", (PyCFunction) py_release,
METH_VARARGS|METH_KEYWORDS, pydoc_release},
{"request", (PyCFunction) py_request,
@@ -1850,6 +2017,12 @@ module_init(PyObject* m)
if (PyModule_AddIntConstant(m, "SETEV_ALL_HOSTS", SANLK_SETEV_ALL_HOSTS))
return -1;
+ /* sanlock_inquire() result resource flags */
+ if (PyModule_AddIntConstant(m, "RES_LVER", SANLK_RES_LVER))
+ return -1;
+ if (PyModule_AddIntConstant(m, "RES_SHARED", SANLK_RES_SHARED))
+ return -1;
+
/* Tuples with supported sector size and alignment values */
PyObject *sector = Py_BuildValue("ii", SECTOR_SIZE_512, SECTOR_SIZE_4K);
diff --git a/tests/python_test.py b/tests/python_test.py
index 58a22c7..caf2f3e 100644
--- a/tests/python_test.py
+++ b/tests/python_test.py
@@ -479,6 +479,90 @@ def test_acquire_release_resource(tmpdir, sanlock_daemon, size, offset):
assert owners == []
+(a)pytest.mark.parametrize("res_name", [
+ "ascii",
+ "\u05d0", # Hebrew Alef
+])
+def test_inquire(tmpdir, sanlock_daemon, res_name):
+ ls_path = str(tmpdir.join("ls_name"))
+ util.create_file(ls_path, MiB)
+
+ res_path = str(tmpdir.join(res_name))
+ util.create_file(res_path, 10 * MiB)
+
+ fd = sanlock.register()
+
+ # No lockspace yet.
+ assert sanlock.inquire(slkfd=fd) == []
+
+ sanlock.write_lockspace(b"ls_name", ls_path, offset=0, iotimeout=1)
+ sanlock.add_lockspace(b"ls_name", 1, ls_path, offset=0, iotimeout=1)
+
+ # No resources created yet.
+ assert sanlock.inquire(slkfd=fd) == []
+
+ resources = [
+ # name, offset, acquire
+ (b"res-0", 0 * MiB, True),
+ (b"res-1", 1 * MiB, False),
+ (b"res-2", 2 * MiB, True),
+ (b"res-8", 8 * MiB, False),
+ (b"res-9", 9 * MiB, True),
+ ]
+
+ for res_name, res_offset, acquire in resources:
+ sanlock.write_resource(b"ls_name", res_name, [(res_path, res_offset)])
+
+ # No resource acquired yet.
+ assert sanlock.inquire(slkfd=fd) == []
+
+ # Acquire resources.
+ for res_name, res_offset, acquire in resources:
+ if acquire:
+ sanlock.acquire(
+ b"ls_name", res_name, [(res_path, res_offset)], slkfd=fd)
+
+ time.sleep(1)
+
+ expected = [
+ {
+ "lockspace": b"ls_name",
+ "resource": b"res-0",
+ "flags": sanlock.RES_LVER,
+ "version": 1,
+ "disks": [(res_path, 0 * MiB)],
+ },
+ {
+ "lockspace": b"ls_name",
+ "resource": b"res-2",
+ "flags": sanlock.RES_LVER,
+ "version": 1,
+ "disks": [(res_path, 2 * MiB)],
+ },
+ {
+ "lockspace": b"ls_name",
+ "resource": b"res-9",
+ "flags": sanlock.RES_LVER,
+ "version": 1,
+ "disks": [(res_path, 9 * MiB)],
+ },
+ ]
+
+ # Check acquired resources using snlkfd.
+ assert sanlock.inquire(slkfd=fd) == expected
+
+ # Check acquired resources using pid.
+ assert sanlock.inquire(pid=os.getpid()) == expected
+
+ for res_name, res_offset, acquire in resources:
+ if acquire:
+ sanlock.release(
+ b"ls_name", res_name, [(res_path, res_offset)], slkfd=fd)
+
+ # All resource released.
+ assert sanlock.inquire(slkfd=fd) == []
+
+
@pytest.mark.parametrize("align, sector", [
# Invalid alignment
(KiB, sanlock.SECTOR_SIZE[0]),
--
2.26.3
2 years
[sanlock] branch master updated: python: Add inquire()
by pagure@pagure.io
This is an automated email from the git hooks/post-receive script.
nsoffer pushed a commit to branch master
in repository sanlock.
The following commit(s) were added to refs/heads/master by this push:
new 2d3e2fc python: Add inquire()
2d3e2fc is described below
commit 2d3e2fceb615a5bd12d26b09fe95668152fb0743
Author: Nir Soffer <nsoffer(a)redhat.com>
AuthorDate: Wed Apr 28 02:20:28 2021 +0300
python: Add inquire()
Use sanlock_inquire() to query the resource held by the current process
(using the slkfd= argument) or held by another program (using the pid=
argument).
When using the slkfd= argument, we communicate with sanlock daemon using
slkfd, ensuring that the current process is connected to sanlock. If the
current process is not connected, sanlock assumes that the process is
dead, and release all the leases acquired by the process.
When using the pid= argument, the function opens a new socket to sanlock
daemon and query the status of resources owned by specified pid.
In both cases the information comes from sanlock daemon, without
accessing storage. To verify storage content, the caller should use
read_resource() and read_resource_owners().
The call returns list of resources dicts that can be used for verifying
that sanlock state matches the program state.
sanlock_inquire() reports the SANLOCK_RES_LVER or sanlock.RES_SHARED
flags in the resource flags field. Add the field to the returned dict
and add sanlock constants for the flag.
The resource flags are needed if you want to restore a lease after it
was released, ensuring that nobody else acquired the lease after it was
released. This flow is used by libvirt using libsanlock. With this
change we can implement the same flow using the python binding.
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
python/sanlock.c | 173 +++++++++++++++++++++++++++++++++++++++++++++++++++
tests/python_test.py | 84 +++++++++++++++++++++++++
2 files changed, 257 insertions(+)
diff --git a/python/sanlock.c b/python/sanlock.c
index 67d34fc..c481464 100644
--- a/python/sanlock.c
+++ b/python/sanlock.c
@@ -323,6 +323,88 @@ exit_fail:
return NULL;
}
+/* Convert disks array to list of tuples. */
+static PyObject *
+disks_to_list(struct sanlk_disk *disks, uint32_t disks_count)
+{
+ PyObject *result = NULL;
+ PyObject *disk = NULL;
+
+ result = PyList_New(disks_count);
+ if (result == NULL)
+ return NULL;
+
+ for (uint32_t i = 0; i < disks_count; i++) {
+ disk = Py_BuildValue(
+ "(s,K)",
+ disks[i].path,
+ disks[i].offset);
+ if (disk == NULL)
+ goto exit_fail;
+
+ /* Steals reference to disk. */
+ if (PyList_SetItem(result, i, disk) != 0)
+ goto exit_fail;
+
+ disk = NULL;
+ }
+
+ return result;
+
+exit_fail:
+ Py_XDECREF(result);
+ Py_XDECREF(disk);
+
+ return NULL;
+}
+
+/* Convert resources array returned from sanlock_inquire() to list of resource
+ * dicts. */
+static PyObject *
+resources_to_list(struct sanlk_resource **res, int res_count)
+{
+ PyObject *result = NULL;
+ PyObject *info = NULL;
+ PyObject *disks = NULL;
+
+ if ((result = PyList_New(res_count)) == NULL)
+ return NULL;
+
+ for (int i = 0; i < res_count; i++) {
+ disks = disks_to_list(res[i]->disks, res[i]->num_disks);
+ if (disks == NULL)
+ goto exit_fail;
+
+ /* Steals reference to disks. */
+ info = Py_BuildValue(
+ "{s:y,s:y,s:k,s:K,s:N}",
+ "lockspace", res[i]->lockspace_name,
+ "resource", res[i]->name,
+ "flags", res[i]->flags,
+ "version", res[i]->lver,
+ "disks", disks);
+ if (info == NULL)
+ goto exit_fail;
+
+ disks = NULL;
+
+ /* Steals reference to info. */
+ if (PyList_SetItem(result, i, info) != 0)
+ goto exit_fail;
+
+ info = NULL;
+ }
+
+ return result;
+
+exit_fail:
+ Py_XDECREF(result);
+ Py_XDECREF(info);
+ Py_XDECREF(disks);
+
+ return NULL;
+}
+
/* register */
PyDoc_STRVAR(pydoc_register, "\
register() -> int\n\
@@ -1062,6 +1144,89 @@ finally:
Py_RETURN_NONE;
}
+/* inquire */
+PyDoc_STRVAR(pydoc_inquire, "\
+inquire(slkfd=-1, pid=-1)\n\
+Return list of resources held by current process (using the slkfd \n\
+argument to specify the sanlock file descriptor) or for another \n\
+process (using the pid argument).\n\
+\n\
+Does not access storage. To learn about resource state on storage,\n\
+use sanlock.read_resource() and sanlock.read_resource_owners().\n\
+\n\
+Arguments\n\
+ slkfd (int): The file descriptor returned from sanlock.register().\n\
+ pid (int): The program pid to query.\n\
+\n\
+Returns\n\
+ List of resource dicts with the following keys:\n\
+ lockspace (bytes): lockspace name\n\
+ resource (bytes): resource name\n\
+ flags (int): resource flags (sanlock.RES_*)\n\
+ version (int): resource version\n\
+ disks (list): list of disk tuples (path, offset)\n\
+");
+
+static PyObject *
+py_inquire(PyObject *self __unused, PyObject *args, PyObject *keywds)
+{
+ int sanlockfd = -1;
+ int pid = -1;
+ char *kwlist[] = {"slkfd", "pid", NULL};
+ int rv = -1;
+
+ /* sanlock_inquire() return values. */
+ int res_count = 0;
+ char *res_state = NULL;
+
+ /* Array of resoruces parsed from res_state. */
+ struct sanlk_resource **res_arr = NULL;
+
+ /* List of resource dicts. */
+ PyObject *result = NULL;
+
+ if (!PyArg_ParseTupleAndKeywords(
+ args, keywds, "|ii", kwlist, &sanlockfd, &pid)) {
+ return NULL;
+ }
+
+ /* Check if any of the slkfd or pid parameters was given. */
+ if (sanlockfd == -1 && pid == -1) {
+ set_sanlock_error(-EINVAL, "Invalid slkfd and pid values");
+ return NULL;
+ }
+
+ /* Inquire sanlock (gil disabled) */
+ Py_BEGIN_ALLOW_THREADS
+ rv = sanlock_inquire(sanlockfd, pid, 0, &res_count, &res_state);
+ Py_END_ALLOW_THREADS
+
+ if (rv != 0) {
+ set_sanlock_error(rv, "Inquire error");
+ return NULL;
+ }
+
+ if (res_count > 0) {
+ rv = sanlock_state_to_args(res_state, &res_count, &res_arr);
+ if (rv != 0) {
+ /* TODO: Include res_state in the error. */
+ set_sanlock_error(rv, "Error parsing inquire state string");
+ goto finally;
+ }
+ }
+
+ result = resources_to_list(res_arr, res_count);
+
+finally:
+ free(res_state);
+
+ for (int i = 0; i < res_count; i++)
+ free(res_arr[i]);
+ free(res_arr);
+
+ return result;
+}
+
/* release */
PyDoc_STRVAR(pydoc_release, "\
release(lockspace, resource, disks [, slkfd=fd, pid=owner])\n\
@@ -1752,6 +1917,8 @@ sanlock_methods[] = {
METH_VARARGS|METH_KEYWORDS, pydoc_read_resource_owners},
{"acquire", (PyCFunction) py_acquire,
METH_VARARGS|METH_KEYWORDS, pydoc_acquire},
+ {"inquire", (PyCFunction) py_inquire,
+ METH_VARARGS|METH_KEYWORDS, pydoc_inquire},
{"release", (PyCFunction) py_release,
METH_VARARGS|METH_KEYWORDS, pydoc_release},
{"request", (PyCFunction) py_request,
@@ -1850,6 +2017,12 @@ module_init(PyObject* m)
if (PyModule_AddIntConstant(m, "SETEV_ALL_HOSTS", SANLK_SETEV_ALL_HOSTS))
return -1;
+ /* sanlock_inquire() result resource flags */
+ if (PyModule_AddIntConstant(m, "RES_LVER", SANLK_RES_LVER))
+ return -1;
+ if (PyModule_AddIntConstant(m, "RES_SHARED", SANLK_RES_SHARED))
+ return -1;
+
/* Tuples with supported sector size and alignment values */
PyObject *sector = Py_BuildValue("ii", SECTOR_SIZE_512, SECTOR_SIZE_4K);
diff --git a/tests/python_test.py b/tests/python_test.py
index 58a22c7..caf2f3e 100644
--- a/tests/python_test.py
+++ b/tests/python_test.py
@@ -479,6 +479,90 @@ def test_acquire_release_resource(tmpdir, sanlock_daemon, size, offset):
assert owners == []
+(a)pytest.mark.parametrize("res_name", [
+ "ascii",
+ "\u05d0", # Hebrew Alef
+])
+def test_inquire(tmpdir, sanlock_daemon, res_name):
+ ls_path = str(tmpdir.join("ls_name"))
+ util.create_file(ls_path, MiB)
+
+ res_path = str(tmpdir.join(res_name))
+ util.create_file(res_path, 10 * MiB)
+
+ fd = sanlock.register()
+
+ # No lockspace yet.
+ assert sanlock.inquire(slkfd=fd) == []
+
+ sanlock.write_lockspace(b"ls_name", ls_path, offset=0, iotimeout=1)
+ sanlock.add_lockspace(b"ls_name", 1, ls_path, offset=0, iotimeout=1)
+
+ # No resources created yet.
+ assert sanlock.inquire(slkfd=fd) == []
+
+ resources = [
+ # name, offset, acquire
+ (b"res-0", 0 * MiB, True),
+ (b"res-1", 1 * MiB, False),
+ (b"res-2", 2 * MiB, True),
+ (b"res-8", 8 * MiB, False),
+ (b"res-9", 9 * MiB, True),
+ ]
+
+ for res_name, res_offset, acquire in resources:
+ sanlock.write_resource(b"ls_name", res_name, [(res_path, res_offset)])
+
+ # No resource acquired yet.
+ assert sanlock.inquire(slkfd=fd) == []
+
+ # Acquire resources.
+ for res_name, res_offset, acquire in resources:
+ if acquire:
+ sanlock.acquire(
+ b"ls_name", res_name, [(res_path, res_offset)], slkfd=fd)
+
+ time.sleep(1)
+
+ expected = [
+ {
+ "lockspace": b"ls_name",
+ "resource": b"res-0",
+ "flags": sanlock.RES_LVER,
+ "version": 1,
+ "disks": [(res_path, 0 * MiB)],
+ },
+ {
+ "lockspace": b"ls_name",
+ "resource": b"res-2",
+ "flags": sanlock.RES_LVER,
+ "version": 1,
+ "disks": [(res_path, 2 * MiB)],
+ },
+ {
+ "lockspace": b"ls_name",
+ "resource": b"res-9",
+ "flags": sanlock.RES_LVER,
+ "version": 1,
+ "disks": [(res_path, 9 * MiB)],
+ },
+ ]
+
+ # Check acquired resources using snlkfd.
+ assert sanlock.inquire(slkfd=fd) == expected
+
+ # Check acquired resources using pid.
+ assert sanlock.inquire(pid=os.getpid()) == expected
+
+ for res_name, res_offset, acquire in resources:
+ if acquire:
+ sanlock.release(
+ b"ls_name", res_name, [(res_path, res_offset)], slkfd=fd)
+
+ # All resource released.
+ assert sanlock.inquire(slkfd=fd) == []
+
+
@pytest.mark.parametrize("align, sector", [
# Invalid alignment
(KiB, sanlock.SECTOR_SIZE[0]),
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
2 years
[sanlock] 01/01: config: Add max_worker_threads
by pagure@pagure.io
This is an automated email from the git hooks/post-receive script.
nsoffer pushed a commit to annotated tag config-v1
in repository sanlock.
commit 96f282c7b055e4856ee010d1b664179e33137c6c
Author: Nir Soffer <nsoffer(a)redhat.com>
AuthorDate: Mon Nov 30 20:40:38 2020 +0200
config: Add max_worker_threads
Concurrent async add_lockspace calls are limited by the number of worker
threads. Using larger number of worker threads shorten the time to add
many locksapces in large setups.
Previously this value could be modified only via the command line. Now
it can be modified via the sanlock configuration file for easier
deployment.
Buglink: https://bugzilla.redhat.com/1902468
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
src/main.c | 7 +++++++
src/sanlock.conf | 3 +++
2 files changed, 10 insertions(+)
diff --git a/src/main.c b/src/main.c
index e2901f3..63d8cff 100644
--- a/src/main.c
+++ b/src/main.c
@@ -2877,6 +2877,13 @@ static void read_config_file(void)
} else {
log_error("ignore unknown max_sectors_kb %s", str);
}
+
+ } else if (!strcmp(str, "max_worker_threads")) {
+ get_val_int(line, &val);
+ if (val < DEFAULT_MIN_WORKER_THREADS)
+ val = DEFAULT_MIN_WORKER_THREADS;
+ com.max_worker_threads = val;
+
}
}
diff --git a/src/sanlock.conf b/src/sanlock.conf
index 9b78e5c..2909a9c 100644
--- a/src/sanlock.conf
+++ b/src/sanlock.conf
@@ -66,3 +66,6 @@
#
# write_init_io_timeout = <seconds>
# command line: n/a
+#
+# max_worker_threads = 8
+# command line: -t 8
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
2 years
[sanlock] 01/01: sanlock: Report actual max_worker_threads
by pagure@pagure.io
This is an automated email from the git hooks/post-receive script.
nsoffer pushed a commit to annotated tag status-v1
in repository sanlock.
commit 0bb9ddcddb4eb8db79372deff0c0a964c9833435
Author: Nir Soffer <nsoffer(a)redhat.com>
AuthorDate: Sun Dec 6 20:14:11 2020 +0200
sanlock: Report actual max_worker_threads
Report actual max_worker_threads in "sanlock client status -D". This
allows detecting the issue when sanlock configuration was updated, but
sanlock daemon needs a restart to pick up the new configuration.
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
src/cmd.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/src/cmd.c b/src/cmd.c
index a5fa30f..3137df0 100644
--- a/src/cmd.c
+++ b/src/cmd.c
@@ -2265,6 +2265,7 @@ static int print_state_daemon(char *str)
"max_sectors_kb_ignore=%d "
"max_sectors_kb_align=%d "
"max_sectors_kb_num=%d "
+ "max_worker_threads=%d "
"write_init_io_timeout=%u "
"use_aio=%d "
"kill_grace_seconds=%d "
@@ -2292,6 +2293,7 @@ static int print_state_daemon(char *str)
com.max_sectors_kb_ignore,
com.max_sectors_kb_align,
com.max_sectors_kb_num,
+ com.max_worker_threads,
com.write_init_io_timeout,
main_task.use_aio,
kill_grace_seconds,
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
2 years
[sanlock] 01/01: sanlock: Setup priority before dropping privileges
by pagure@pagure.io
This is an automated email from the git hooks/post-receive script.
nsoffer pushed a commit to annotated tag startup-v1
in repository sanlock.
commit 7cdfc7d6a06f892b449eb5b85d647b95fc084154
Author: Nir Soffer <nsoffer(a)redhat.com>
AuthorDate: Sun May 3 01:34:21 2020 +0300
sanlock: Setup priority before dropping privileges
sched_setscheduler() requires root, but we called it after dropping
privileges, so it always failed:
2020-02-13 12:34:19 1480 [8866]: sanlock daemon started 3.8.0 host a08359de-225c-4c21-a7d6-3623bb3bd6fb.host4
2020-02-13 12:34:19 1480 [8866]: set scheduler RR|RESET_ON_FORK priority 99 failed: Operation not permitted
Move setup_priority up before we drop privileges.
With this change sanlock runs now with RR scheduler and expected
priority:
$ ps -o cmd,cls,rtprio -p 2275
CMD CLS RTPRIO
/usr/sbin/sanlock daemon RR 99
Not running with real time scheduler may be the reason we see random
failures to write lockspace in oVirt system tests:
https://bugzilla.redhat.com/1247135
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
src/main.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/main.c b/src/main.c
index 8c6eef8..ebc0b11 100644
--- a/src/main.c
+++ b/src/main.c
@@ -1750,14 +1750,14 @@ static int do_daemon(void)
setup_host_name();
+ setup_priority();
+
setup_uid_gid();
uname(&nodename);
log_warn("sanlock daemon started %s host %s (%s)", VERSION, our_host_name_global, nodename.nodename);
- setup_priority();
-
rv = thread_pool_create(DEFAULT_MIN_WORKER_THREADS, com.max_worker_threads);
if (rv < 0)
goto out;
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
2 years
[sanlock] 01/01: sanlock: Shrink thread pool when there is no work
by pagure@pagure.io
This is an automated email from the git hooks/post-receive script.
nsoffer pushed a commit to annotated tag shrink-pool-v1
in repository sanlock.
commit 19a937183a92b61e0519f4e6d3d83bc24e718991
Author: Nir Soffer <nsoffer(a)redhat.com>
AuthorDate: Tue Dec 1 14:52:55 2020 +0200
sanlock: Shrink thread pool when there is no work
When the thread pool has no work to do, and we have enough free workers,
other worker threads will terminate.
Without this change, when using large number of worker threads, the
thread pool grows to max_worker_threads workers, and never shrink down.
With this change, the pool quickly shrinks down, but we always have
enough free worker threads for serving new requests.
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
src/main.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/src/main.c b/src/main.c
index 63d8cff..622dc8e 100644
--- a/src/main.c
+++ b/src/main.c
@@ -932,6 +932,9 @@ static void *thread_pool_worker(void *data)
while (1) {
while (!pool.quit && list_empty(&pool.work_data)) {
+ if (pool.free_workers >= DEFAULT_MIN_WORKER_THREADS)
+ goto out;
+
pool.free_workers++;
pthread_cond_wait(&pool.cond, &pool.mutex);
pool.free_workers--;
@@ -952,6 +955,7 @@ static void *thread_pool_worker(void *data)
break;
}
+out:
pool.num_workers--;
if (!pool.num_workers)
pthread_cond_signal(&pool.quit_wait);
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
2 years
[sanlock] 01/01: python: Add inquire()
by pagure@pagure.io
This is an automated email from the git hooks/post-receive script.
nsoffer pushed a commit to annotated tag python-inquire-v1
in repository sanlock.
commit 683b9d6d46649da9f5650c8e467d07e350d1d6ed
Author: Nir Soffer <nsoffer(a)redhat.com>
AuthorDate: Wed Apr 28 02:20:28 2021 +0300
python: Add inquire()
Use sanlock_inquire() to query the resource held by the current process
(using the slkfd= argument) or held by another program (using the pid=
argument).
When using the slkfd= argument, we communicate with sanlock daemon using
slkfd, ensuring that the current process is connected to sanlock. If the
current process is not connected, sanlock assumes that the process is
dead, and release all the leases acquired by the process.
When using the pid= argument, the function opens a new socket to sanlock
daemon and query the status of resources owned by specified pid.
In both cases the information comes from sanlock daemon, without
accessing storage. To verify storage content, the caller should use
read_resource() and read_resource_owners().
The call returns list of resources dicts that can be used for verifying
that sanlock state matches the program state.
sanlock_inquire() reports the SANLOCK_RES_LVER or sanlock.RES_SHARED
flags in the resource flags field. Add the field to the returned dict
and add sanlock constants for the flag.
The resource flags are needed if you want to restore a lease after it
was released, ensuring that nobody else acquired the lease after it was
released. This flow is used by libvirt using libsanlock. With this
change we can implement the same flow using the python binding.
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
python/sanlock.c | 173 +++++++++++++++++++++++++++++++++++++++++++++++++++
tests/python_test.py | 84 +++++++++++++++++++++++++
2 files changed, 257 insertions(+)
diff --git a/python/sanlock.c b/python/sanlock.c
index 67d34fc..238d172 100644
--- a/python/sanlock.c
+++ b/python/sanlock.c
@@ -323,6 +323,88 @@ exit_fail:
return NULL;
}
+/* Convert disks array to list of tuples. */
+static PyObject *
+disks_to_list(struct sanlk_disk *disks, uint32_t disks_count)
+{
+ PyObject *result = NULL;
+ PyObject *disk = NULL;
+
+ result = PyList_New(disks_count);
+ if (result == NULL)
+ return NULL;
+
+ for (uint32_t i = 0; i < disks_count; i++) {
+ disk = Py_BuildValue(
+ "(s,K)",
+ disks[i].path,
+ disks[i].offset);
+ if (disk == NULL)
+ goto exit_fail;
+
+ /* Steals reference to disk. */
+ if (PyList_SetItem(result, i, disk) != 0)
+ goto exit_fail;
+
+ disk = NULL;
+ }
+
+ return result;
+
+exit_fail:
+ Py_XDECREF(result);
+ Py_XDECREF(disk);
+
+ return NULL;
+}
+
+/* Convert resources array returned from sanlock_inquire() to list of resource
+ * dicts. */
+static PyObject *
+resources_to_list(struct sanlk_resource **res, int res_count)
+{
+ PyObject *result = NULL;
+ PyObject *info = NULL;
+ PyObject *disks = NULL;
+
+ if ((result = PyList_New(res_count)) == NULL)
+ return NULL;
+
+ for (int i = 0; i < res_count; i++) {
+ disks = disks_to_list(res[i]->disks, res[i]->num_disks);
+ if (disks == NULL)
+ goto exit_fail;
+
+ /* Steals reference to disks. */
+ info = Py_BuildValue(
+ "{s:y,s:y,s:k,s:K,s:N}",
+ "lockspace", res[i]->lockspace_name,
+ "resource", res[i]->name,
+ "flags", res[i]->flags,
+ "version", res[i]->lver,
+ "disks", disks);
+ if (info == NULL)
+ goto exit_fail;
+
+ disks = NULL;
+
+ /* Steals reference to info. */
+ if (PyList_SetItem(result, i, info) != 0)
+ goto exit_fail;
+
+ info = NULL;
+ }
+
+ return result;
+
+exit_fail:
+ Py_XDECREF(result);
+ Py_XDECREF(info);
+ Py_XDECREF(disks);
+
+ return NULL;
+}
+
/* register */
PyDoc_STRVAR(pydoc_register, "\
register() -> int\n\
@@ -1062,6 +1144,89 @@ finally:
Py_RETURN_NONE;
}
+/* inquire */
+PyDoc_STRVAR(pydoc_inquire, "\
+inquire(slkfd=fd, pid=owner)\n\
+Return list of resources held by current process (using the slkfd \n\
+argument to specify the sanlock file descriptor) or for another \n\
+process (using the pid argument).\n\
+\n\
+Does not access storage. To learn about resource state on storage,\n\
+use sanlock.read_resource() and sanlock.read_resource_owners().\n\
+\n\
+Arguments\n\
+ slkfd (int): The file descriptor returned from sanlock.register().\n\
+ pid (int): The program pid to query.\n\
+\n\
+Returns\n\
+ List of resource dicts with the following keys:\n\
+ lockspace (bytes): lockspace name\n\
+ resource (bytes): resource name\n\
+ flags (int): resource flags (sanlock.RES_*)\n\
+ version (int): resource version\n\
+ disks (list): list of disk tuples (path, offset)\n\
+");
+
+static PyObject *
+py_inquire(PyObject *self __unused, PyObject *args, PyObject *keywds)
+{
+ int sanlockfd = -1;
+ int pid = -1;
+ char *kwlist[] = {"slkfd", "pid", NULL};
+ int rv = -1;
+
+ /* sanlock_inquire() return values. */
+ int res_count = 0;
+ char *res_state = NULL;
+
+ /* Array of resoruces parsed from res_state. */
+ struct sanlk_resource **res_arr = NULL;
+
+ /* List of resource dicts. */
+ PyObject *result = NULL;
+
+ if (!PyArg_ParseTupleAndKeywords(
+ args, keywds, "|ii", kwlist, &sanlockfd, &pid)) {
+ return NULL;
+ }
+
+ /* Check if any of the slkfd or pid parameters was given. */
+ if (sanlockfd == -1 && pid == -1) {
+ set_sanlock_error(-EINVAL, "Invalid slkfd and pid values");
+ return NULL;
+ }
+
+ /* Inquire sanlock (gil disabled) */
+ Py_BEGIN_ALLOW_THREADS
+ rv = sanlock_inquire(sanlockfd, pid, 0, &res_count, &res_state);
+ Py_END_ALLOW_THREADS
+
+ if (rv != 0) {
+ set_sanlock_error(rv, "Inquire error");
+ return NULL;
+ }
+
+ if (res_count > 0) {
+ rv = sanlock_state_to_args(res_state, &res_count, &res_arr);
+ if (rv != 0) {
+ /* TODO: Include res_state in the error. */
+ set_sanlock_error(rv, "Error parsing inquire state string");
+ goto finally;
+ }
+ }
+
+ result = resources_to_list(res_arr, res_count);
+
+finally:
+ free(res_state);
+
+ for (int i = 0; i < res_count; i++)
+ free(res_arr[i]);
+ free(res_arr);
+
+ return result;
+}
+
/* release */
PyDoc_STRVAR(pydoc_release, "\
release(lockspace, resource, disks [, slkfd=fd, pid=owner])\n\
@@ -1752,6 +1917,8 @@ sanlock_methods[] = {
METH_VARARGS|METH_KEYWORDS, pydoc_read_resource_owners},
{"acquire", (PyCFunction) py_acquire,
METH_VARARGS|METH_KEYWORDS, pydoc_acquire},
+ {"inquire", (PyCFunction) py_inquire,
+ METH_VARARGS|METH_KEYWORDS, pydoc_inquire},
{"release", (PyCFunction) py_release,
METH_VARARGS|METH_KEYWORDS, pydoc_release},
{"request", (PyCFunction) py_request,
@@ -1850,6 +2017,12 @@ module_init(PyObject* m)
if (PyModule_AddIntConstant(m, "SETEV_ALL_HOSTS", SANLK_SETEV_ALL_HOSTS))
return -1;
+ /* sanlock_inquire() result resource flags */
+ if (PyModule_AddIntConstant(m, "RES_LVER", SANLK_RES_LVER))
+ return -1;
+ if (PyModule_AddIntConstant(m, "RES_SHARED", SANLK_RES_SHARED))
+ return -1;
+
/* Tuples with supported sector size and alignment values */
PyObject *sector = Py_BuildValue("ii", SECTOR_SIZE_512, SECTOR_SIZE_4K);
diff --git a/tests/python_test.py b/tests/python_test.py
index 58a22c7..caf2f3e 100644
--- a/tests/python_test.py
+++ b/tests/python_test.py
@@ -479,6 +479,90 @@ def test_acquire_release_resource(tmpdir, sanlock_daemon, size, offset):
assert owners == []
+(a)pytest.mark.parametrize("res_name", [
+ "ascii",
+ "\u05d0", # Hebrew Alef
+])
+def test_inquire(tmpdir, sanlock_daemon, res_name):
+ ls_path = str(tmpdir.join("ls_name"))
+ util.create_file(ls_path, MiB)
+
+ res_path = str(tmpdir.join(res_name))
+ util.create_file(res_path, 10 * MiB)
+
+ fd = sanlock.register()
+
+ # No lockspace yet.
+ assert sanlock.inquire(slkfd=fd) == []
+
+ sanlock.write_lockspace(b"ls_name", ls_path, offset=0, iotimeout=1)
+ sanlock.add_lockspace(b"ls_name", 1, ls_path, offset=0, iotimeout=1)
+
+ # No resources created yet.
+ assert sanlock.inquire(slkfd=fd) == []
+
+ resources = [
+ # name, offset, acquire
+ (b"res-0", 0 * MiB, True),
+ (b"res-1", 1 * MiB, False),
+ (b"res-2", 2 * MiB, True),
+ (b"res-8", 8 * MiB, False),
+ (b"res-9", 9 * MiB, True),
+ ]
+
+ for res_name, res_offset, acquire in resources:
+ sanlock.write_resource(b"ls_name", res_name, [(res_path, res_offset)])
+
+ # No resource acquired yet.
+ assert sanlock.inquire(slkfd=fd) == []
+
+ # Acquire resources.
+ for res_name, res_offset, acquire in resources:
+ if acquire:
+ sanlock.acquire(
+ b"ls_name", res_name, [(res_path, res_offset)], slkfd=fd)
+
+ time.sleep(1)
+
+ expected = [
+ {
+ "lockspace": b"ls_name",
+ "resource": b"res-0",
+ "flags": sanlock.RES_LVER,
+ "version": 1,
+ "disks": [(res_path, 0 * MiB)],
+ },
+ {
+ "lockspace": b"ls_name",
+ "resource": b"res-2",
+ "flags": sanlock.RES_LVER,
+ "version": 1,
+ "disks": [(res_path, 2 * MiB)],
+ },
+ {
+ "lockspace": b"ls_name",
+ "resource": b"res-9",
+ "flags": sanlock.RES_LVER,
+ "version": 1,
+ "disks": [(res_path, 9 * MiB)],
+ },
+ ]
+
+ # Check acquired resources using snlkfd.
+ assert sanlock.inquire(slkfd=fd) == expected
+
+ # Check acquired resources using pid.
+ assert sanlock.inquire(pid=os.getpid()) == expected
+
+ for res_name, res_offset, acquire in resources:
+ if acquire:
+ sanlock.release(
+ b"ls_name", res_name, [(res_path, res_offset)], slkfd=fd)
+
+ # All resource released.
+ assert sanlock.inquire(slkfd=fd) == []
+
+
@pytest.mark.parametrize("align, sector", [
# Invalid alignment
(KiB, sanlock.SECTOR_SIZE[0]),
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
2 years
[sanlock] 01/01: tox: Remove python 2 tests, add python 3.7,
3.8 tests
by pagure@pagure.io
This is an automated email from the git hooks/post-receive script.
nsoffer pushed a commit to annotated tag py2-v1
in repository sanlock.
commit 1da60ae07b2618f2ca41348fc8022b03bfd04390
Author: Nir Soffer <nsoffer(a)redhat.com>
AuthorDate: Tue Apr 28 02:28:16 2020 +0300
tox: Remove python 2 tests, add python 3.7, 3.8 tests
Python 2 is dead for a while, and there is no point in running the tests
now. Python 3.7 and 3.8 are available and we want to test them on
travis.
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
.travis.yml | 4 ++--
tox.ini | 5 ++---
2 files changed, 4 insertions(+), 5 deletions(-)
diff --git a/.travis.yml b/.travis.yml
index 89fb52a..bea805d 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,10 +3,10 @@ dist: xenial
language: python
python:
- - "2.7"
- "3.6"
- "3.7"
- - "3.8-dev"
+ - "3.8"
+ - "3.9-dev"
addons:
apt:
diff --git a/tox.ini b/tox.ini
index 4a28561..af8a6cc 100644
--- a/tox.ini
+++ b/tox.ini
@@ -4,7 +4,7 @@
# and then run "tox" from this directory.
[tox]
-envlist = py27,py36,flake8
+envlist = py{36,37,38},flake8
skipsdist = True
skip_missing_interpreters = True
@@ -18,8 +18,7 @@ whitelist_externals = make
deps =
pytest==4.0
commands =
- py27: make PY_VERSION=2.7 BUILDARGS="--build-lib={envsitepackagesdir}"
- py36: make PY_VERSION=3.6 BUILDARGS="--build-lib={envsitepackagesdir}"
+ py{36,37,38}: make BUILDARGS="--build-lib={envsitepackagesdir}"
pytest {posargs}
[testenv:flake8]
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
2 years
[sanlock] 02/02: tests: Test handling paths with colons
by pagure@pagure.io
This is an automated email from the git hooks/post-receive script.
nsoffer pushed a commit to annotated tag dump-tests-v1
in repository sanlock.
commit fe06cdc64268fd2637e544d5463d477471b9ac85
Author: Nir Soffer <nsoffer(a)redhat.com>
AuthorDate: Tue Apr 28 04:08:18 2020 +0300
tests: Test handling paths with colons
"sanlock direct dump" supports now escaped colons in path. Add a test to
verify this behaviour. Unfortunately, "sanlock direct init" does not
support that yet.
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
tests/direct_test.py | 24 ++++++++++++++++++++++++
tests/util.py | 5 +++--
2 files changed, 27 insertions(+), 2 deletions(-)
diff --git a/tests/direct_test.py b/tests/direct_test.py
index bc66526..a7ad014 100644
--- a/tests/direct_test.py
+++ b/tests/direct_test.py
@@ -10,6 +10,7 @@ Test sanlock direct options.
from __future__ import absolute_import
import io
+import os
import struct
from . import constants
@@ -113,3 +114,26 @@ def test_dump_resources_start_before(tmpdir):
['04194304', 'ls_name', 'res_4', '0000000000', '0000', '0000', '0'],
['05242880', 'ls_name', 'res_5', '0000000000', '0000', '0000', '0'],
]
+
+
+def test_path_with_colon(tmpdir):
+ path = str(tmpdir.mkdir("with:colon").join("resources"))
+ size = 8 * MiB
+ util.create_file(path, size)
+
+ # sanlock direct init does not support escaped colons in path.
+ dirname, filename = os.path.split(path)
+ res = "ls_name:res_0:%s:0M" % filename
+ util.sanlock("direct", "init", "-r", res, cwd=dirname)
+
+ # sanlock direct dump supports escaped colons in path.
+ escaped_path = path.replace(":", "\\:")
+ dump = "%s:0:8M" % escaped_path
+ out = util.sanlock("direct", "dump", dump)
+
+ lines = out.decode("utf-8").splitlines()
+ resources = [line.split() for line in lines]
+ assert resources == [
+ ['offset', 'lockspace', 'resource', 'timestamp', 'own', 'gen', 'lver'],
+ ['00000000', 'ls_name', 'res_0', '0000000000', '0000', '0000', '0'],
+ ]
diff --git a/tests/util.py b/tests/util.py
index 12f2702..df36ebb 100644
--- a/tests/util.py
+++ b/tests/util.py
@@ -82,14 +82,15 @@ def wait_for_daemon(timeout):
s.close()
-def sanlock(*args):
+def sanlock(*args, cwd=None):
"""
Run sanlock returning the process stdout, or raising
util.CommandError on failures.
"""
cmd = [SANLOCK]
cmd.extend(args)
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p = subprocess.Popen(
+ cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
out, err = p.communicate()
if p.returncode:
raise CommandError(cmd, p.returncode, out, err)
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
2 years
[sanlock] 01/02: tests: Tests dumping lockspace and resources
by pagure@pagure.io
This is an automated email from the git hooks/post-receive script.
nsoffer pushed a commit to annotated tag dump-tests-v1
in repository sanlock.
commit 8e3eb3dcaa153afdb22787d7143ee930d1ad4560
Author: Nir Soffer <nsoffer(a)redhat.com>
AuthorDate: Tue Apr 28 03:33:49 2020 +0300
tests: Tests dumping lockspace and resources
Add tests for dumping:
- empty lockspace
- resources with a hole
- start before first resource
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
tests/direct_test.py | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 65 insertions(+), 1 deletion(-)
diff --git a/tests/direct_test.py b/tests/direct_test.py
index 579f228..bc66526 100644
--- a/tests/direct_test.py
+++ b/tests/direct_test.py
@@ -34,7 +34,27 @@ def test_init_lockspace(tmpdir):
util.check_guard(str(path), size)
-def test_init_resource(tmpdir, sanlock_daemon):
+def test_dump_lockspace_empty(tmpdir):
+ path = tmpdir.join("lockspace")
+ size = MiB
+ util.create_file(str(path), size)
+
+ lockspace = "name:1:%s:0" % path
+ util.sanlock("direct", "init", "-s", lockspace)
+
+ dump = "%s:0:1M" % path
+ out = util.sanlock("direct", "dump", dump)
+
+ lines = out.decode("utf-8").splitlines()
+ spaces = [line.split() for line in lines]
+
+ # Empty lockspace has no hosts.
+ assert spaces == [
+ ['offset', 'lockspace', 'resource', 'timestamp', 'own', 'gen', 'lver']
+ ]
+
+
+def test_init_resource(tmpdir):
path = tmpdir.join("resources")
size = MiB
util.create_file(str(path), size)
@@ -49,3 +69,47 @@ def test_init_resource(tmpdir, sanlock_daemon):
# TODO: check more stuff here...
util.check_guard(str(path), size)
+
+
+def test_dump_resources(tmpdir):
+ path = tmpdir.join("resources")
+ size = 8 * MiB
+ util.create_file(str(path), size)
+
+ # Write 2 resources with a hole between them.
+ for i in [0, 2]:
+ res = "ls_name:res_%d:%s:%dM" % (i, path, i)
+ util.sanlock("direct", "init", "-r", res)
+
+ dump = "%s:0:8M" % path
+ out = util.sanlock("direct", "dump", dump)
+
+ lines = out.decode("utf-8").splitlines()
+ resources = [line.split() for line in lines]
+ assert resources == [
+ ['offset', 'lockspace', 'resource', 'timestamp', 'own', 'gen', 'lver'],
+ ['00000000', 'ls_name', 'res_0', '0000000000', '0000', '0000', '0'],
+ ['02097152', 'ls_name', 'res_2', '0000000000', '0000', '0000', '0'],
+ ]
+
+
+def test_dump_resources_start_before(tmpdir):
+ path = tmpdir.join("resources")
+ size = 8 * MiB
+ util.create_file(str(path), size)
+
+ # Write 2 resources at middle.
+ for i in [4, 5]:
+ res = "ls_name:res_%d:%s:%dM" % (i, path, i)
+ util.sanlock("direct", "init", "-r", res)
+
+ dump = "%s:2M:8M" % path
+ out = util.sanlock("direct", "dump", dump)
+
+ lines = out.decode("utf-8").splitlines()
+ resources = [line.split() for line in lines]
+ assert resources == [
+ ['offset', 'lockspace', 'resource', 'timestamp', 'own', 'gen', 'lver'],
+ ['04194304', 'ls_name', 'res_4', '0000000000', '0000', '0000', '0'],
+ ['05242880', 'ls_name', 'res_5', '0000000000', '0000', '0000', '0'],
+ ]
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
2 years