Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=2a7c2539c63f72d6…
Commit: 2a7c2539c63f72d6c50df54f53271e567fa948ee
Parent: 500fd8b9bfc6f15d2417c829bce33ea4768261b8
Author: Peter Rajnoha <prajnoha(a)redhat.com>
AuthorDate: Tue Jul 21 09:54:20 2015 +0200
Committer: Peter Rajnoha <prajnoha(a)redhat.com>
CommitterDate: Tue Jul 21 09:54:20 2015 +0200
wiping: ignore errors during detection if use_blkid_wiping=1 and --force is used
libblkid may return the list of signatures found, but it may not
provide offset and size for each signature detected. This may
happen in case signatures are mixed up or there are more, possibly
overlapping, signatures found.
Make lvm commands pass if such situation happens and we're using
--force (or any stronger force method).
For example:
$ pvcreate /dev/sda1
Failed to get offset of the xfs_external_log signature on /dev/sda1.
1 existing signature left on the device.
Aborting pvcreate on /dev/sda1.
$ pvcreate --force /dev/sda1
Failed to get offset of the xfs_external_log signature on /dev/sda1.
Physical volume "/dev/sda1" successfully created
---
WHATS_NEW | 1 +
lib/device/dev-type.c | 8 ++++----
2 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/WHATS_NEW b/WHATS_NEW
index 3cd9111..22a0233 100644
--- a/WHATS_NEW
+++ b/WHATS_NEW
@@ -1,5 +1,6 @@
Version 2.02.126 -
================================
+ Ignore errors during detection if use_blkid_wiping=1 and --force is used.
Recognise DM_ABORT_ON_INTERNAL_ERRORS env var override in lvm logging fn.
Fix alloc segfault when extending LV with fewer stripes than in first seg.
Fix handling of cache policy name.
diff --git a/lib/device/dev-type.c b/lib/device/dev-type.c
index 82e37e0..f9d82b0 100644
--- a/lib/device/dev-type.c
+++ b/lib/device/dev-type.c
@@ -545,20 +545,20 @@ static int _blkid_wipe(blkid_probe probe, struct device *dev, const char *name,
return 2;
if (blkid_probe_lookup_value(probe, "SBMAGIC_OFFSET", &offset, NULL)) {
log_error(_msg_failed_offset, type, name);
- return 0;
+ return (force < DONT_PROMPT) ? 0 : 2;
}
if (blkid_probe_lookup_value(probe, "SBMAGIC", &magic, &len)) {
log_error(_msg_failed_length, type, name);
- return 0;
+ return (force < DONT_PROMPT) ? 0 : 2;
}
} else if (!blkid_probe_lookup_value(probe, "PTTYPE", &type, NULL)) {
if (blkid_probe_lookup_value(probe, "PTMAGIC_OFFSET", &offset, NULL)) {
log_error(_msg_failed_offset, type, name);
- return 0;
+ return (force < DONT_PROMPT) ? 0 : 2;
}
if (blkid_probe_lookup_value(probe, "PTMAGIC", &magic, &len)) {
log_error(_msg_failed_length, type, name);
- return 0;
+ return (force < DONT_PROMPT) ? 0 : 2;
}
usage = "partition table";
} else
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=b4be988732e909d3…
Commit: b4be988732e909d34ae7b4868bf30cf44c087518
Parent: b785a50da415cd8ee48e971f720640457514032d
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Fri Jul 17 15:13:22 2015 -0500
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Fri Jul 17 15:35:34 2015 -0500
vgchange/lvchange: enforce the shared VG lock from lvmlockd
The vgchange/lvchange activation commands read the VG, and
don't write it, so they acquire a shared VG lock from lvmlockd.
When other commands fail to acquire a shared VG lock from
lvmlockd, a warning is printed and they continue without it.
(Without it, the VG metadata they display from lvmetad may
not be up to date.)
vgchange/lvchange -a shouldn't continue without the shared
lock for a couple reasons:
. Usually they will just continue on and fail to acquire the
LV locks for activation, so continuing is pointless.
. More importantly, without the sh VG lock, the VG metadata
used by the command may be stale, and the LV locks shown
in the VG metadata may no longer be current. In the
case of sanlock, this would result in odd, unpredictable
errors when lvmlockd doesn't find the expected lock on
disk. In the case of dlm, the invalid LV lock could be
granted for the non-existing LV.
The solution is to not continue after the shared lock fails,
in the same way that a command fails if an exclusive lock fails.
---
lib/commands/toolcontext.h | 1 +
lib/metadata/metadata.c | 2 +-
tools/lvchange.c | 4 +++-
tools/vgchange.c | 4 +++-
4 files changed, 8 insertions(+), 3 deletions(-)
diff --git a/lib/commands/toolcontext.h b/lib/commands/toolcontext.h
index 283783f..4e8998d 100644
--- a/lib/commands/toolcontext.h
+++ b/lib/commands/toolcontext.h
@@ -107,6 +107,7 @@ struct cmd_context {
unsigned lockd_vg_disable:1;
unsigned lockd_lv_disable:1;
unsigned lockd_vg_default_sh:1;
+ unsigned lockd_vg_enforce_sh:1;
struct dev_types *dev_types;
diff --git a/lib/metadata/metadata.c b/lib/metadata/metadata.c
index 56ed3fb..8757331 100644
--- a/lib/metadata/metadata.c
+++ b/lib/metadata/metadata.c
@@ -4803,7 +4803,7 @@ static int _access_vg_lock_type(struct cmd_context *cmd, struct volume_group *vg
* no lock.
*/
if (lockd_state & LDST_FAIL) {
- if (lockd_state & LDST_EX) {
+ if ((lockd_state & LDST_EX) || cmd->lockd_vg_enforce_sh) {
log_error("Cannot access VG %s due to failed lock.", vg->name);
*failure |= FAILED_LOCK_MODE;
return 0;
diff --git a/tools/lvchange.c b/tools/lvchange.c
index 58e0fb2..823c36a 100644
--- a/tools/lvchange.c
+++ b/tools/lvchange.c
@@ -1294,8 +1294,10 @@ int lvchange(struct cmd_context *cmd, int argc, char **argv)
* are cases where lvchange does not modify the vg, so they can use
* the sh lock mode.
*/
- if (arg_count(cmd, activate_ARG) || arg_count(cmd, refresh_ARG))
+ if (arg_count(cmd, activate_ARG) || arg_count(cmd, refresh_ARG)) {
cmd->lockd_vg_default_sh = 1;
+ cmd->lockd_vg_enforce_sh = 1;
+ }
return process_each_lv(cmd, argc, argv,
update ? READ_FOR_UPDATE : 0, NULL,
diff --git a/tools/vgchange.c b/tools/vgchange.c
index 9612222..824fd91 100644
--- a/tools/vgchange.c
+++ b/tools/vgchange.c
@@ -1017,8 +1017,10 @@ static int _lockd_vgchange(struct cmd_context *cmd, int argc, char **argv)
return 0;
}
- if (arg_is_set(cmd, activate_ARG) || arg_is_set(cmd, refresh_ARG))
+ if (arg_is_set(cmd, activate_ARG) || arg_is_set(cmd, refresh_ARG)) {
cmd->lockd_vg_default_sh = 1;
+ cmd->lockd_vg_enforce_sh = 1;
+ }
/* Starting a vg lockspace means there are no locks available yet. */
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=b93b85378d119e83…
Commit: b93b85378d119e8396b0469574770cd097a988f0
Parent: e15db1592642d5eb0a2a76b35882502c541302c7
Author: Alasdair G Kergon <agk(a)redhat.com>
AuthorDate: Wed Jul 15 23:12:54 2015 +0100
Committer: Alasdair G Kergon <agk(a)redhat.com>
CommitterDate: Wed Jul 15 23:12:54 2015 +0100
alloc: Fix lvextend failure when varying stripes.
A segfault was reported when extending an LV with a smaller number of
stripes than originally used. Under unusual circumstances, the cling
detection code could successfully find a match against the excess
stripe positions and think it had finished prematurely leading to an
allocation being pursued with a length of zero.
Rename ix_offset to num_positional_areas and move it to struct
alloc_state so that _is_condition() can obtain access to it.
In _is_condition(), areas_size can no longer be assumed to match the
number of positional slots being filled so check this newly-exposed
num_positional_areas directly instead. If the slot is outside the
range we are trying to fill, just ignore the match for now.
(Also note that the code still only performs cling detection against
the first segment of the LV.)
---
WHATS_NEW | 3 ++
lib/metadata/lv_manip.c | 72 ++++++++++++++++++++++++++---------------------
2 files changed, 43 insertions(+), 32 deletions(-)
diff --git a/WHATS_NEW b/WHATS_NEW
index 43fa004..596a92f 100644
--- a/WHATS_NEW
+++ b/WHATS_NEW
@@ -1,10 +1,13 @@
Version 2.02.126 -
================================
+ Fix alloc segfault when extending LV with fewer stripes than in first seg.
Fix handling of cache policy name.
Set cache policy before with the first lvm2 cache pool metadata commit.
Fix detection of thin-pool overprovisioning (2.02.124).
Fix lvmpolld segfaults on 32 bit architectures.
+ Add lvmlockd lock_args validation to vg_validate.
Fix ignored --startstopservices option if running lvmconf with systemd.
+ Hide sanlock LVs when processing LVs in VG unless named or --all used.
Version 2.02.125 - 7th July 2015
================================
diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c
index 19d0ca7..86411d5 100644
--- a/lib/metadata/lv_manip.c
+++ b/lib/metadata/lv_manip.c
@@ -74,6 +74,7 @@ struct alloc_state {
uint32_t areas_size;
uint32_t log_area_count_still_needed; /* Number of areas still needing to be allocated for the log */
uint32_t allocated; /* Total number of extents allocated so far */
+ uint32_t num_positional_areas; /* Number of parallel allocations that must be contiguous/cling */
};
struct lv_names {
@@ -2242,6 +2243,10 @@ static int _is_condition(struct cmd_context *cmd __attribute__((unused)),
if (!pvmatch->condition(pvmatch, pvseg, pvmatch->pva))
return 1; /* Continue */
+ if (positional && (s >= pvmatch->alloc_state->num_positional_areas))
+ return 1;
+
+ /* FIXME The previous test should make this one redundant. */
if (positional && (s >= pvmatch->alloc_state->areas_size))
return 1;
@@ -2479,6 +2484,8 @@ static void _clear_areas(struct alloc_state *alloc_state)
{
uint32_t s;
+ alloc_state->num_positional_areas = 0;
+
for (s = 0; s < alloc_state->areas_size; s++)
alloc_state->areas[s].pva = NULL;
}
@@ -2521,9 +2528,10 @@ static void _report_needed_allocation_space(struct alloc_handle *ah,
metadata_count = alloc_state->log_area_count_still_needed;
}
- log_debug_alloc("Still need %s%" PRIu32 " total extents from %" PRIu32 " remaining:",
+ log_debug_alloc("Still need %s%" PRIu32 " total extents from %" PRIu32 " remaining (%" PRIu32 " positional slots):",
ah->approx_alloc ? "up to " : "",
- parallel_area_size * parallel_areas_count + metadata_size * metadata_count, pv_maps_size(pvms));
+ parallel_area_size * parallel_areas_count + metadata_size * metadata_count, pv_maps_size(pvms),
+ alloc_state->num_positional_areas);
log_debug_alloc(" %" PRIu32 " (%" PRIu32 " data/%" PRIu32
" parity) parallel areas of %" PRIu32 " extents each",
parallel_areas_count, ah->area_count, ah->parity_count, parallel_area_size);
@@ -2578,7 +2586,6 @@ static int _find_some_parallel_space(struct alloc_handle *ah,
struct pv_area *pva;
unsigned preferred_count = 0;
unsigned already_found_one;
- unsigned ix_offset = 0; /* Offset for non-preferred allocations */
unsigned ix_log_offset; /* Offset to start of areas to use for log */
unsigned too_small_for_log_count; /* How many too small for log? */
unsigned iteration_count = 0; /* cling_to_alloced may need 2 iterations */
@@ -2588,27 +2595,28 @@ static int _find_some_parallel_space(struct alloc_handle *ah,
uint32_t devices_needed = ah->area_count + ah->parity_count;
uint32_t required;
- /* ix_offset holds the number of parallel allocations that must be contiguous/cling */
+ _clear_areas(alloc_state);
+ _reset_unreserved(pvms);
+
+ /* num_positional_areas holds the number of parallel allocations that must be contiguous/cling */
+ /* These appear first in the array, so it is also the offset to the non-preferred allocations */
/* At most one of A_CONTIGUOUS_TO_LVSEG, A_CLING_TO_LVSEG or A_CLING_TO_ALLOCED may be set */
if (!(alloc_parms->flags & A_POSITIONAL_FILL))
- ix_offset = 0;
+ alloc_state->num_positional_areas = 0;
else if (alloc_parms->flags & (A_CONTIGUOUS_TO_LVSEG | A_CLING_TO_LVSEG))
- ix_offset = _stripes_per_mimage(alloc_parms->prev_lvseg) * alloc_parms->prev_lvseg->area_count;
+ alloc_state->num_positional_areas = _stripes_per_mimage(alloc_parms->prev_lvseg) * alloc_parms->prev_lvseg->area_count;
else if (alloc_parms->flags & A_CLING_TO_ALLOCED)
- ix_offset = ah->area_count;
+ alloc_state->num_positional_areas = ah->area_count;
if (alloc_parms->alloc == ALLOC_NORMAL || (alloc_parms->flags & A_CLING_TO_ALLOCED))
log_debug_alloc("Cling_to_allocated is %sset",
alloc_parms->flags & A_CLING_TO_ALLOCED ? "" : "not ");
if (alloc_parms->flags & A_POSITIONAL_FILL)
- log_debug_alloc("%u preferred area(s) to be filled positionally.", ix_offset);
+ log_debug_alloc("%u preferred area(s) to be filled positionally.", alloc_state->num_positional_areas);
else
log_debug_alloc("Areas to be sorted and filled sequentially.");
- _clear_areas(alloc_state);
- _reset_unreserved(pvms);
-
_report_needed_allocation_space(ah, alloc_state, pvms);
/* ix holds the number of areas found on other PVs */
@@ -2616,7 +2624,7 @@ static int _find_some_parallel_space(struct alloc_handle *ah,
if (log_iteration_count) {
log_debug_alloc("Found %u areas for %" PRIu32 " parallel areas and %" PRIu32 " log areas so far.", ix, devices_needed, alloc_state->log_area_count_still_needed);
} else if (iteration_count)
- log_debug_alloc("Filled %u out of %u preferred areas so far.", preferred_count, ix_offset);
+ log_debug_alloc("Filled %u out of %u preferred areas so far.", preferred_count, alloc_state->num_positional_areas);
/*
* Provide for escape from the loop if no progress is made.
@@ -2657,7 +2665,7 @@ static int _find_some_parallel_space(struct alloc_handle *ah,
* not enough for the logs.
*/
if (log_iteration_count) {
- for (s = devices_needed; s < ix + ix_offset; s++)
+ for (s = devices_needed; s < ix + alloc_state->num_positional_areas; s++)
if (alloc_state->areas[s].pva && alloc_state->areas[s].pva->map->pv == pvm->pv)
goto next_pv;
/* On a second pass, avoid PVs already used in an uncommitted area */
@@ -2705,8 +2713,8 @@ static int _find_some_parallel_space(struct alloc_handle *ah,
}
/* Reserve required amount of pva */
- required = _calc_required_extents(ah, pva, ix + ix_offset - 1, max_to_allocate, alloc_parms->alloc);
- if (!_reserve_required_area(ah, alloc_state, pva, required, ix + ix_offset - 1, pva->unreserved))
+ required = _calc_required_extents(ah, pva, ix + alloc_state->num_positional_areas - 1, max_to_allocate, alloc_parms->alloc);
+ if (!_reserve_required_area(ah, alloc_state, pva, required, ix + alloc_state->num_positional_areas - 1, pva->unreserved))
return_0;
}
@@ -2717,23 +2725,23 @@ static int _find_some_parallel_space(struct alloc_handle *ah,
/* With cling and contiguous we stop if we found a match for *all* the areas */
/* FIXME Rename these variables! */
if ((alloc_parms->alloc == ALLOC_ANYWHERE &&
- ix + ix_offset >= devices_needed + alloc_state->log_area_count_still_needed) ||
- (preferred_count == ix_offset &&
- (ix_offset == devices_needed + alloc_state->log_area_count_still_needed)))
+ ix + alloc_state->num_positional_areas >= devices_needed + alloc_state->log_area_count_still_needed) ||
+ (preferred_count == alloc_state->num_positional_areas &&
+ (alloc_state->num_positional_areas == devices_needed + alloc_state->log_area_count_still_needed)))
break;
}
} while ((alloc_parms->alloc == ALLOC_ANYWHERE && last_ix != ix && ix < devices_needed + alloc_state->log_area_count_still_needed) ||
/* With cling_to_alloced and normal, if there were gaps in the preferred areas, have a second iteration */
(alloc_parms->alloc == ALLOC_NORMAL && preferred_count &&
- (preferred_count < ix_offset || alloc_state->log_area_count_still_needed) &&
+ (preferred_count < alloc_state->num_positional_areas || alloc_state->log_area_count_still_needed) &&
(alloc_parms->flags & A_CLING_TO_ALLOCED) && !iteration_count++) ||
/* Extra iteration needed to fill log areas on PVs already used? */
- (alloc_parms->alloc == ALLOC_NORMAL && preferred_count == ix_offset && !ah->mirror_logs_separate &&
+ (alloc_parms->alloc == ALLOC_NORMAL && preferred_count == alloc_state->num_positional_areas && !ah->mirror_logs_separate &&
(ix + preferred_count >= devices_needed) &&
(ix + preferred_count < devices_needed + alloc_state->log_area_count_still_needed) && !log_iteration_count++));
/* Non-zero ix means at least one USE_AREA was returned */
- if (preferred_count < ix_offset && !(alloc_parms->flags & A_CLING_TO_ALLOCED) && !ix)
+ if (preferred_count < alloc_state->num_positional_areas && !(alloc_parms->flags & A_CLING_TO_ALLOCED) && !ix)
return 1;
if (ix + preferred_count < devices_needed + alloc_state->log_area_count_still_needed)
@@ -2748,17 +2756,17 @@ static int _find_some_parallel_space(struct alloc_handle *ah,
}
} else if (ix > 1) {
log_debug_alloc("Sorting %u areas", ix);
- qsort(alloc_state->areas + ix_offset, ix, sizeof(*alloc_state->areas),
+ qsort(alloc_state->areas + alloc_state->num_positional_areas, ix, sizeof(*alloc_state->areas),
_comp_area);
}
/* If there are gaps in our preferred areas, fill them from the sorted part of the array */
- if (preferred_count && preferred_count != ix_offset) {
+ if (preferred_count && preferred_count != alloc_state->num_positional_areas) {
for (s = 0; s < devices_needed; s++)
if (!alloc_state->areas[s].pva) {
- alloc_state->areas[s].pva = alloc_state->areas[ix_offset].pva;
- alloc_state->areas[s].used = alloc_state->areas[ix_offset].used;
- alloc_state->areas[ix_offset++].pva = NULL;
+ alloc_state->areas[s].pva = alloc_state->areas[alloc_state->num_positional_areas].pva;
+ alloc_state->areas[s].used = alloc_state->areas[alloc_state->num_positional_areas].used;
+ alloc_state->areas[alloc_state->num_positional_areas++].pva = NULL;
}
}
@@ -2772,14 +2780,14 @@ static int _find_some_parallel_space(struct alloc_handle *ah,
/* FIXME This logic is due to its heritage and can be simplified! */
if (alloc_state->log_area_count_still_needed) {
/* How many areas are too small for the log? */
- while (too_small_for_log_count < ix_offset + ix &&
- (*(alloc_state->areas + ix_offset + ix - 1 -
+ while (too_small_for_log_count < alloc_state->num_positional_areas + ix &&
+ (*(alloc_state->areas + alloc_state->num_positional_areas + ix - 1 -
too_small_for_log_count)).used < ah->log_len)
too_small_for_log_count++;
- ix_log_offset = ix_offset + ix - too_small_for_log_count - ah->log_area_count;
+ ix_log_offset = alloc_state->num_positional_areas + ix - too_small_for_log_count - ah->log_area_count;
}
- if (ix + ix_offset < devices_needed +
+ if (ix + alloc_state->num_positional_areas < devices_needed +
(alloc_state->log_area_count_still_needed ? alloc_state->log_area_count_still_needed +
too_small_for_log_count : 0))
return 1;
@@ -2793,12 +2801,12 @@ static int _find_some_parallel_space(struct alloc_handle *ah,
/*
* This code covers the initial allocation - after that there is something to 'cling' to
* and we shouldn't get this far.
- * ix_offset is assumed to be 0 with A_PARTITION_BY_TAGS.
+ * alloc_state->num_positional_areas is assumed to be 0 with A_PARTITION_BY_TAGS.
*
* FIXME Consider a second attempt with A_PARTITION_BY_TAGS if, for example, the largest area
* had all the tags set, but other areas don't.
*/
- if ((alloc_parms->flags & A_PARTITION_BY_TAGS) && !ix_offset) {
+ if ((alloc_parms->flags & A_PARTITION_BY_TAGS) && !alloc_state->num_positional_areas) {
if (!_limit_to_one_area_per_tag(ah, alloc_state, ix_log_offset, &ix))
return_0;