master - io: keep 64b arithmetic
by Zdenek Kabelac
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=6b48868cf0754b13e4e...
Commit: 6b48868cf0754b13e4efaaaec33cf8c9deba2d40
Parent: 261e6c3df69082490a685f0d0599e88139b9a927
Author: Zdenek Kabelac <zkabelac(a)redhat.com>
AuthorDate: Sun Feb 25 22:32:14 2018 +0100
Committer: Zdenek Kabelac <zkabelac(a)redhat.com>
CommitterDate: Wed Feb 28 21:05:18 2018 +0100
io: keep 64b arithmetic
Widen to 64b arithmetic from start.
---
lib/device/dev-io.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/lib/device/dev-io.c b/lib/device/dev-io.c
index 31506e8..50b1ba8 100644
--- a/lib/device/dev-io.c
+++ b/lib/device/dev-io.c
@@ -117,7 +117,7 @@ int dev_async_setup(struct cmd_context *cmd)
int r;
_aio_max = find_config_tree_int(cmd, devices_aio_max_CFG, NULL);
- _aio_memory_max = find_config_tree_int(cmd, devices_aio_memory_CFG, NULL) * 1024 * 1024;
+ _aio_memory_max = find_config_tree_int(cmd, devices_aio_memory_CFG, NULL) * INT64_C(1024 * 1024);
/* Threshold is zero? */
if (!_aio_max || !_aio_memory_max) {
6 years, 1 month
master - coverity: missing free on error path
by Zdenek Kabelac
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=9bfc8881cb712a6b164...
Commit: 9bfc8881cb712a6b164f08ca3d70ec99745e6c22
Parent: 32bcdd90ae7c847e7b4fc5cc5184c581a4f6484a
Author: Zdenek Kabelac <zkabelac(a)redhat.com>
AuthorDate: Sun Feb 25 16:17:42 2018 +0100
Committer: Zdenek Kabelac <zkabelac(a)redhat.com>
CommitterDate: Wed Feb 28 21:05:18 2018 +0100
coverity: missing free on error path
---
daemons/dmfilemapd/dmfilemapd.c | 6 ++++--
1 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/daemons/dmfilemapd/dmfilemapd.c b/daemons/dmfilemapd/dmfilemapd.c
index aa50242..7519799 100644
--- a/daemons/dmfilemapd/dmfilemapd.c
+++ b/daemons/dmfilemapd/dmfilemapd.c
@@ -802,7 +802,7 @@ bad:
return 1;
}
-static const char * _mode_names[] = {
+static const char * const _mode_names[] = {
"inode",
"path"
};
@@ -827,8 +827,10 @@ int main(int argc, char **argv)
"mode=%s, path=%s", fm.fd, fm.group_id,
_mode_names[fm.mode], fm.path);
- if (!_foreground && !_daemonise(&fm))
+ if (!_foreground && !_daemonise(&fm)) {
+ dm_free(fm.path);
return 1;
+ }
return _dmfilemapd(&fm);
}
6 years, 1 month
master - raid: add free for error path
by Zdenek Kabelac
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=261e6c3df69082490a6...
Commit: 261e6c3df69082490a685f0d0599e88139b9a927
Parent: 9bfc8881cb712a6b164f08ca3d70ec99745e6c22
Author: Zdenek Kabelac <zkabelac(a)redhat.com>
AuthorDate: Sun Feb 25 16:20:30 2018 +0100
Committer: Zdenek Kabelac <zkabelac(a)redhat.com>
CommitterDate: Wed Feb 28 21:05:18 2018 +0100
raid: add free for error path
Recent patch forget to release now allocated 'dso' on error path.
---
lib/raid/raid.c | 10 +++++++---
1 files changed, 7 insertions(+), 3 deletions(-)
diff --git a/lib/raid/raid.c b/lib/raid/raid.c
index 31a0ac0..269a30c 100644
--- a/lib/raid/raid.c
+++ b/lib/raid/raid.c
@@ -655,6 +655,7 @@ int init_multiple_segtypes(struct cmd_context *cmd, struct segtype_library *segl
char *dso = NULL;
unsigned i;
uint64_t monitored = 0;
+ int r = 1;
#ifdef DEVMAPPER_SUPPORT
# ifdef DMEVENTD
@@ -667,11 +668,14 @@ int init_multiple_segtypes(struct cmd_context *cmd, struct segtype_library *segl
for (i = 0; i < DM_ARRAY_SIZE(_raid_types); ++i)
if ((segtype = _init_raid_segtype(cmd, &_raid_types[i], dso, monitored)) &&
- !lvm_register_segtype(seglib, segtype))
+ !lvm_register_segtype(seglib, segtype)) {
/* segtype is already destroyed */
- return_0;
+ stack;
+ r = 0;
+ break;
+ }
dm_free(dso);
- return 1;
+ return r;
}
6 years, 1 month
master - tests: check vgsplit thin-data and ext.origin
by Marian Csontos
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=32bcdd90ae7c847e7b4...
Commit: 32bcdd90ae7c847e7b4fc5cc5184c581a4f6484a
Parent: 8e5305f630be818e677572e4c43d2c728a89f54f
Author: Marian Csontos <mcsontos(a)redhat.com>
AuthorDate: Tue Feb 27 13:45:39 2018 +0100
Committer: Marian Csontos <mcsontos(a)redhat.com>
CommitterDate: Tue Feb 27 14:37:47 2018 +0100
tests: check vgsplit thin-data and ext.origin
---
test/shell/vgsplit-thin.sh | 20 ++++++++++++++++++++
1 files changed, 20 insertions(+), 0 deletions(-)
diff --git a/test/shell/vgsplit-thin.sh b/test/shell/vgsplit-thin.sh
index 0e4572e..a2d0a0b 100644
--- a/test/shell/vgsplit-thin.sh
+++ b/test/shell/vgsplit-thin.sh
@@ -44,3 +44,23 @@ lvs -a -o+devices $vg1 $vg2
vgmerge $vg1 $vg2
vgremove -ff $vg1
+
+# Test vgsplit with ext.origin:
+if aux have_thin 1 5 0; then
+vgcreate "$vg1" "${DEVICES[@]}"
+lvcreate -T -L8M $vg1/pool1 -V10M -n $lv1 "$dev1" "$dev2"
+lvcreate -l1 -an -pr -n $lv2 $vg1 "$dev3"
+lvcreate -s $vg1/$lv2 -n $lv3 --thinpool $vg1/pool1
+lvcreate -l1 -n $lv4 $vg1 "$dev4"
+vgchange -an $vg1
+
+# Can not split ext.origin from thin-data:
+not vgsplit $vg1 $vg2 "$dev1" "$dev2"
+not vgsplit $vg1 $vg2 "$dev3"
+
+vgsplit $vg1 $vg2 "$dev1" "$dev2" "$dev3"
+
+vgmerge $vg1 $vg2
+
+vgremove -ff $vg1
+fi
6 years, 2 months
master - tests: correct usage of pipe
by Zdenek Kabelac
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=8e5305f630be818e677...
Commit: 8e5305f630be818e677572e4c43d2c728a89f54f
Parent: e7f1329cae118ccbfded213eee4895d99d79120b
Author: Zdenek Kabelac <zkabelac(a)redhat.com>
AuthorDate: Sat Feb 17 11:24:32 2018 +0100
Committer: Zdenek Kabelac <zkabelac(a)redhat.com>
CommitterDate: Mon Feb 19 16:45:10 2018 +0100
tests: correct usage of pipe
This is somewhat tricky - for test suite we keep using
'set -e -o pipefail' - the effect here is - we get error report
from any 'failing' command in whole pipeline - thus when something
like this: 'lvs | head -1' is used - and 'head' finishes before
lead 'lvs' is done - it recieves SIGPIPE and exits with error,
and somewhat misleading gets occasionally reported depending
of speed of commands.
For this case we have to avoid using standard pipes and rather
switch to using streamed results with temporary output file.
This is all nicely handled with bash feature '< <()'.
For more info:
https://stackoverflow.com/questions/41516177/bash-zcat-head-causes-pipefail
---
test/lib/get.sh | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/test/lib/get.sh b/test/lib/get.sh
index e7139dc..afc10bc 100644
--- a/test/lib/get.sh
+++ b/test/lib/get.sh
@@ -47,7 +47,7 @@ lv_field() {
lv_first_seg_field() {
local r
- r=$(lvs --config 'log{prefix=""}' --noheadings -o "$2" "${@:3}" "$1" | head -1)
+ r=$(head -1 < <(lvs --config 'log{prefix=""}' --unbuffered --noheadings -o "$2" "${@:3}" "$1"))
trim_ "$r"
}
@@ -74,7 +74,7 @@ lv_field_lv_() {
lv_tree_devices_() {
local lv="$1/$2"
local type
- type=$(lv_field "$lv" segtype -a --unbuffered | head -n 1)
+ type=$(lv_first_seg_field "$lv" segtype -a)
#local orig
#orig=$(lv_field_lv_ "$lv" origin)
# FIXME: should we count in also origins ?
6 years, 2 months
master - locking: move cache dropping to primary locking code
by Zdenek Kabelac
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=c3bb2b29d441f27d7e1...
Commit: c3bb2b29d441f27d7e1d88f71d934ba8c955b26d
Parent: e87fa7c9cef53013efa46033ad037822c70c1bb9
Author: Zdenek Kabelac <zkabelac(a)redhat.com>
AuthorDate: Mon Feb 19 16:31:52 2018 +0100
Committer: Zdenek Kabelac <zkabelac(a)redhat.com>
CommitterDate: Mon Feb 19 16:45:05 2018 +0100
locking: move cache dropping to primary locking code
While 'file-locking' code always dropped cached VG before
lock was taken - other locking types actually missed this.
So while the cache dropping has been implement for i.e. clvmd,
actually running command in cluster keept using cache even
when the lock has been i.e. dropped and taken again.
This rather 'hard-to-hit' error was noticable in some
tests running in cluster where content of PV has been
changed (metadata-balance.sh)
Fix the code by moving cache dropping directly lock_vol() function.
TODO: it's kind of strange we should ever need drop_cached_metadata()
used in several places - this all should happen automatically
this some futher thinking here is likely needed.
---
WHATS_NEW | 1 +
lib/locking/file_locking.c | 5 +----
lib/locking/locking.c | 7 +++++++
3 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/WHATS_NEW b/WHATS_NEW
index b11de8c..5791930 100644
--- a/WHATS_NEW
+++ b/WHATS_NEW
@@ -1,5 +1,6 @@
Version 2.02.178 -
=====================================
+ Ensure cluster commands drop their device cache before locking VG.
Do not report LV as remotely active when it's locally exclusive in cluster.
Add deprecate messages for usage of mirrors with mirrorlog.
Separate reporting of monitoring status and error status.
diff --git a/lib/locking/file_locking.c b/lib/locking/file_locking.c
index 245892e..517a64f 100644
--- a/lib/locking/file_locking.c
+++ b/lib/locking/file_locking.c
@@ -60,11 +60,8 @@ static int _file_lock_resource(struct cmd_context *cmd, const char *resource,
return_0;
break;
case LCK_VG:
- if (!strcmp(resource, VG_SYNC_NAMES)) {
+ if (!strcmp(resource, VG_SYNC_NAMES))
fs_unlock();
- } else if (strcmp(resource, VG_GLOBAL))
- /* Skip cache refresh for VG_GLOBAL - the caller handles it */
- lvmcache_drop_metadata(resource, 0);
/* LCK_CACHE does not require a real lock */
if (flags & LCK_CACHE)
diff --git a/lib/locking/locking.c b/lib/locking/locking.c
index 1a3ce9d..d61aa35 100644
--- a/lib/locking/locking.c
+++ b/lib/locking/locking.c
@@ -336,6 +336,13 @@ int lock_vol(struct cmd_context *cmd, const char *vol, uint32_t flags, const str
!lvmcache_verify_lock_order(vol))
return_0;
+ if ((flags == LCK_VG_DROP_CACHE) ||
+ (strcmp(vol, VG_GLOBAL) && strcmp(vol, VG_SYNC_NAMES))) {
+ /* Skip dropping cache for internal VG names #global, #sync_names */
+ log_debug_locking("Dropping cache for %s.", vol);
+ lvmcache_drop_metadata(vol, 0);
+ }
+
/* Lock VG to change on-disk metadata. */
/* If LVM1 driver knows about the VG, it can't be accessed. */
if (!check_lvm1_vg_inactive(cmd, vol))
6 years, 2 months
master - debug: capture internal error for too long resource name
by Zdenek Kabelac
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=e7f1329cae118ccbfde...
Commit: e7f1329cae118ccbfded213eee4895d99d79120b
Parent: c3bb2b29d441f27d7e1d88f71d934ba8c955b26d
Author: Zdenek Kabelac <zkabelac(a)redhat.com>
AuthorDate: Mon Feb 19 15:30:55 2018 +0100
Committer: Zdenek Kabelac <zkabelac(a)redhat.com>
CommitterDate: Mon Feb 19 16:45:10 2018 +0100
debug: capture internal error for too long resource name
Should never happen, so just put in internal error instead of silently
passing some shortened resource name.
---
lib/locking/locking.c | 8 +++++---
1 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/lib/locking/locking.c b/lib/locking/locking.c
index d61aa35..1e1be56 100644
--- a/lib/locking/locking.c
+++ b/lib/locking/locking.c
@@ -264,7 +264,7 @@ static int _lock_vol(struct cmd_context *cmd, const char *resource,
}
if ((is_orphan_vg(resource) || is_global_vg(resource)) && (flags & LCK_CACHE)) {
- log_error(INTERNAL_ERROR "P_%s referenced", resource);
+ log_error(INTERNAL_ERROR "P_%s referenced.", resource);
goto out;
}
@@ -358,8 +358,10 @@ int lock_vol(struct cmd_context *cmd, const char *vol, uint32_t flags, const str
return 0;
}
- strncpy(resource, vol, sizeof(resource) - 1);
- resource[sizeof(resource) - 1] = '\0';
+ if (!dm_strncpy(resource, vol, sizeof(resource))) {
+ log_error(INTERNAL_ERROR "Resource name %s is too long.", vol);
+ return 0;
+ }
if (!_lock_vol(cmd, resource, flags, lv_op, lv))
return_0;
6 years, 2 months
master - sanlock: set proper return value
by Zdenek Kabelac
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=e87fa7c9cef53013efa...
Commit: e87fa7c9cef53013efa46033ad037822c70c1bb9
Parent: 1671b83585a6ee49448376c20834a9e89936a7f3
Author: Zdenek Kabelac <zkabelac(a)redhat.com>
AuthorDate: Sat Feb 17 11:23:19 2018 +0100
Committer: Zdenek Kabelac <zkabelac(a)redhat.com>
CommitterDate: Mon Feb 19 16:44:10 2018 +0100
sanlock: set proper return value
In last patch one error path missed to assign correct return value.
Assing it directly to 'ret' as log_error was already reported.
---
daemons/lvmlockd/lvmlockd-sanlock.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/daemons/lvmlockd/lvmlockd-sanlock.c b/daemons/lvmlockd/lvmlockd-sanlock.c
index dd88d24..a91218b 100644
--- a/daemons/lvmlockd/lvmlockd-sanlock.c
+++ b/daemons/lvmlockd/lvmlockd-sanlock.c
@@ -1121,7 +1121,7 @@ int lm_prepare_lockspace_sanlock(struct lockspace *ls)
goto fail;
}
- if ((rv = build_dm_path(disk_path, SANLK_PATH_LEN, ls->vg_name, lock_lv_name)))
+ if ((ret = build_dm_path(disk_path, SANLK_PATH_LEN, ls->vg_name, lock_lv_name)))
goto fail;
/*
6 years, 2 months
master - doc: Fixing VDO document
by Marian Csontos
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=1671b83585a6ee49448...
Commit: 1671b83585a6ee49448376c20834a9e89936a7f3
Parent: f5401fbd34371dfed74f981eb1d4c5b4b3220cc2
Author: Marian Csontos <mcsontos(a)redhat.com>
AuthorDate: Fri Feb 16 17:09:40 2018 +0100
Committer: Marian Csontos <mcsontos(a)redhat.com>
CommitterDate: Fri Feb 16 17:10:54 2018 +0100
doc: Fixing VDO document
---
doc/vdo.md | 59 +++++++++++++++++++++++++++++++++++++++--------------------
1 files changed, 39 insertions(+), 20 deletions(-)
diff --git a/doc/vdo.md b/doc/vdo.md
index a85518b..5c5a33c 100644
--- a/doc/vdo.md
+++ b/doc/vdo.md
@@ -22,17 +22,25 @@ Usual limitations apply:
- Never layer LUKS over another LUKS - it makes no sense.
- LUKS is better over the raids, than under.
+Devices which are not best suitable as backing device:
+
+- thin volumes - at the moment it is not possible to take snapshot of active VDO volume on top of thin volume.
+
### Using VDO as a PV:
-1. under tpool
+1. under tdata
- The best fit - it will deduplicate additional redundancies among all
snapshots and will reduce the footprint.
- Risks: Resize! dmevent will not be able to handle resizing of tpool ATM.
2. under corig
- - Cache fits better under VDO device - it will reduce amount of data, and
- deduplicate, so there should be more hits.
- This is useful to keep the most frequently used data in cache
- uncompressed (if that happens to be a bottleneck.)
+ uncompressed or without deduplication if that happens to be a bottleneck.
+ - Cache may fit better under VDO device, depending on compressibility and
+ amount of duplicates, as
+ - compression will reduce amount of data, thus effectively increasing
+ size of cache,
+ - and deduplication may emphasize hotspots.
+ - Performance testing of your particular workload is strongly recommended.
3. under (multiple) linear LVs - e.g. used for VMs.
### And where VDO does not fit:
@@ -50,36 +58,47 @@ Usual limitations apply:
- under snapshot CoW device - when there are multiple of those it could deduplicate
+## Development
+
### Things to decide
-- under integrity devices - it should work - mostly for data
- - hash is not compressible and unique - it makes sense to have separate imeta and idata volumes for integrity devices
+- under integrity devices
+ - VDO should work well for data blocks,
+ - but hashes are mostly unique and not compressible - were it possible it
+ would make sense to have separate imeta and idata volumes for integrity
+ devices.
### Future Integration of VDO into LVM:
One issue is using both LUKS and RAID under VDO. We have two options:
- use mdadm x LUKS x VDO+LV
-- use LV RAID x LUKS x VDO+LV - still requiring recursive LVs.
+- use LV RAID x LUKS x VDO+LV
+
+In both cases dmeventd will not be able to resize the volume at the moment.
-Another issue is duality of VDO - it is a top level LV but it can be seen as a "pool" for multiple devices.
+Another issue is duality of VDO - it can be used as a top level LV (with a
+filesystem on top) but it can be used as "pool" for multiple devices too.
-- This is one usecase which could not be handled by LVM at the moment.
-- Size of the VDO is its physical size and virtual size - just like tpool.
- - same problems with virtual vs physical size - it can get full, without exposing it fo a FS
+This will be solved in similar way thin pools allow multiple volumes.
-Another possible RFE is to split data and metadata:
+Also VDO, has two sizes - its physical size and virtual size - and when
+overprovisioning, just like tpool, we face same problems - VDO can get full,
+without exposing it to a FS. dmeventd monitoring will be needed.
-- e.g. keep data on HDD and metadata on SSD
+Another possible RFE is to split data and metadata - keep data on HDD and metadata on SSD.
## Issues / Testing
- fstrim/discard pass down - does it work with VDO?
-- VDO can run in synchronous vs. asynchronous mode
- - synchronous for devices where write is safe after it is confirmed. Some devices are lying.
- - asynchronous for devices requiring flush
-- multiple devices under VDO - need to find common options
-- pvmove - changing characteristics of underlying device
-- autoactivation during boot
- - Q: can we use VDO for RootFS?
+- VDO can run in synchronous vs. asynchronous mode:
+ - synchronous for devices where write is safe after it is confirmed. Some
+ devices are lying.
+ - asynchronous for devices requiring flush.
+- Multiple devices under VDO - need to find and expose common properties, or
+ not allow grouping them together. (This is same for all volumes with more
+ physical devices below.)
+- pvmove changing characteristics of underlying device.
+- autoactivation during boot?
+ - Q: can we use VDO for RootFS? Dracut!
6 years, 2 months
master - pvmove: enhance accepted states of active LVs
by Zdenek Kabelac
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=552e60b3a1e35329a47...
Commit: 552e60b3a1e35329a47d6112c548ada124b5a4e3
Parent: a2d2fe3a8cf840fcfcd23fb0e706c3699b79b5fa
Author: Zdenek Kabelac <zkabelac(a)redhat.com>
AuthorDate: Thu Feb 15 13:39:58 2018 +0100
Committer: Zdenek Kabelac <zkabelac(a)redhat.com>
CommitterDate: Thu Feb 15 13:55:38 2018 +0100
pvmove: enhance accepted states of active LVs
Improve pvmove to accept 'locally' active LVs together with
exclusive active LVs.
In the 1st. phase it now recognizes whether exclusive pvmove is needed.
For this case only 'exclusively' or 'locally-only without remote
activative state' LVs are acceptable and all others are skipped.
During build-up of pvmove 'activation' steps are taken, so if
there is any problem we can now 'skip' LVs from pvmove operation
rather then giving-up whole pvmove operation.
Also when pvmove is restarted, recognize need of exclusive pvmove,
and use it whenever there is LV, that require exclusive activation.
---
tools/pvmove.c | 97 +++++++++++++++++++++++++++++++-------------------------
1 files changed, 54 insertions(+), 43 deletions(-)
diff --git a/tools/pvmove.c b/tools/pvmove.c
index cbd5cb8..2a26a10 100644
--- a/tools/pvmove.c
+++ b/tools/pvmove.c
@@ -340,8 +340,8 @@ static struct logical_volume *_set_up_pvmove_lv(struct cmd_context *cmd,
uint32_t log_count = 0;
int lv_found = 0;
int lv_skipped = 0;
- int lv_active_count = 0;
- int lv_exclusive_count = 0;
+ int needs_exclusive = *exclusive;
+ const struct logical_volume *holder;
/* FIXME Cope with non-contiguous => splitting existing segments */
if (!(lv_mirr = lv_create_empty("pvmove%d", NULL,
@@ -392,8 +392,13 @@ static struct logical_volume *_set_up_pvmove_lv(struct cmd_context *cmd,
return NULL;
}
- if (seg_is_raid(first_seg(lv)) ||
- seg_is_mirrored(first_seg(lv))) {
+ seg = first_seg(lv);
+
+ /* Presence of exclusive LV decides whether pvmove must be also exclusive */
+ if ((seg_only_exclusive(seg) || lv_is_origin(lv) || lv_is_cow(lv)))
+ needs_exclusive = 1;
+
+ if (seg_is_raid(seg) || seg_is_mirrored(seg)) {
dm_list_init(&trim_list);
if (!get_pv_list_for_lv(vg->cmd->mem, lv, &trim_list))
@@ -432,6 +437,14 @@ static struct logical_volume *_set_up_pvmove_lv(struct cmd_context *cmd,
lv_found = 1;
}
+ seg = first_seg(lv);
+
+ if (seg_is_cache(seg) || seg_is_cache_pool(seg) ||
+ seg_is_mirrored(seg) || seg_is_raid(seg) ||
+ seg_is_snapshot(seg) ||
+ seg_is_thin(seg) || seg_is_thin_pool(seg))
+ continue; /* bottom-level LVs only... */
+
if (!lv_is_on_pvs(lv, source_pvl))
continue;
@@ -441,47 +454,36 @@ static struct logical_volume *_set_up_pvmove_lv(struct cmd_context *cmd,
continue;
}
- if (vg_is_clustered(vg) && lv_is_visible(lv)) {
- if (lv_is_active_exclusive_locally(lv)) {
- if (lv_active_count) {
- log_error("Cannot move in clustered VG %s "
- "if some LVs are activated "
- "exclusively while others don't.",
- vg->name);
- return NULL;
- }
-
- lv_exclusive_count++;
- } else if (lv_is_active(lv)) {
- if (seg_only_exclusive(first_seg(lv))) {
- lv_skipped = 1;
- log_print_unless_silent("Skipping LV %s which is active, "
- "but not locally exclusively.",
- display_lvname(lv));
- continue;
- }
-
- if (*exclusive) {
- log_error("Cannot move in clustered VG %s, "
- "clustered mirror (cmirror) not detected "
- "and LVs are activated non-exclusively.",
- vg->name);
- return NULL;
- }
-
- lv_active_count++;
- }
- }
+ holder = lv_lock_holder(lv);
- seg = first_seg(lv);
- if (seg_is_raid(seg) || seg_is_mirrored(seg) ||
- seg_is_cache(seg) || seg_is_cache_pool(seg) ||
- seg_is_thin(seg) || seg_is_thin_pool(seg))
- /*
- * Pass over top-level LVs - they were handled.
- * Allow sub-LVs to proceed.
+ if (needs_exclusive) {
+ /* With exclusive pvmove skip LV when:
+ * - is active remotely
+ * - is not active locally and cannot be activated exclusively locally
+ *
+ * Note: lvm2 can proceed with exclusive pvmove for 'just' locally active LVs
+ * in the case it's NOT active anywhere else, since LOCKED LVs cannot be
+ * later activated by user.
*/
+ if (lv_is_active_remotely(holder) ||
+ (!lv_is_active_locally(holder) && !activate_lv_excl_local(cmd, holder))) {
+ lv_skipped = 1;
+ log_print_unless_silent("Skipping LV %s which is not locally exclusive%s.",
+ display_lvname(lv),
+ /* Report missing cmirrord cases that matterd.
+ * With exclusive LV types cmirrord would not help. */
+ (*exclusive &&
+ !lv_is_origin(holder) &&
+ !seg_only_exclusive(first_seg(holder))) ?
+ " and clustered mirror (cmirror) not detected" : "");
+ continue;
+ }
+ } else if (!activate_lv(cmd, holder)) {
+ lv_skipped = 1;
+ log_print_unless_silent("Skipping LV %s which cannot be activated.",
+ display_lvname(lv));
continue;
+ }
if (!_insert_pvmove_mirrors(cmd, lv_mirr, source_pvl, lv,
*lvs_changed))
@@ -517,7 +519,7 @@ static struct logical_volume *_set_up_pvmove_lv(struct cmd_context *cmd,
return NULL;
}
- if (lv_exclusive_count)
+ if (needs_exclusive)
*exclusive = 1;
return lv_mirr;
@@ -600,6 +602,8 @@ static int _pvmove_setup_single(struct cmd_context *cmd,
struct dm_list *lvs_changed;
struct logical_volume *lv_mirr;
struct logical_volume *lv = NULL;
+ struct lv_list *lvl;
+ const struct logical_volume *lvh;
const char *pv_name = pv_dev_name(pv);
unsigned flags = PVMOVE_FIRST_TIME;
unsigned exclusive;
@@ -661,6 +665,13 @@ static int _pvmove_setup_single(struct cmd_context *cmd,
goto out;
}
+ dm_list_iterate_items(lvl, lvs_changed) {
+ lvh = lv_lock_holder(lvl->lv);
+ /* Exclusive LV decides whether pvmove must be also exclusive */
+ if (lv_is_origin(lvh) || seg_only_exclusive(first_seg(lvh)))
+ exclusive = 1;
+ }
+
/* Ensure mirror LV is active */
if (!_activate_lv(cmd, lv_mirr, exclusive)) {
log_error("ABORTING: Temporary mirror activation failed.");
6 years, 2 months