master - cache: Add functions that create/remove cache LVs
by Jonathan Brassow
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=b94a3ee9f644e6...
Commit: b94a3ee9f644e6be6f2f70dacefef0f606e2eaa5
Parent: ef6c5795a0b0502b5ef984233fbeb73a9d81f2d0
Author: Jonathan Brassow <jbrassow(a)redhat.com>
AuthorDate: Tue Feb 4 07:59:58 2014 -0600
Committer: Jonathan Brassow <jbrassow(a)redhat.com>
CommitterDate: Tue Feb 4 07:59:58 2014 -0600
cache: Add functions that create/remove cache LVs
A cache LV - from LVM's perpective - is a user accessible device that
links the cachepool LV and the origin LV. The following functions
were added to facilitate the creation and removal of this top-level
LV:
1) 'lv_cache_create' - takes a cachepool and an origin device and links
them into a new top-level LV of 'cache' segment type. No allocation
is necessary in this function, as the sub-LVs contain all of the
necessary allocated space. Only the top-level layer needs to be
created.
2) 'lv_cache_remove' - this function removes the top-level LV of a
cache LV - promoting the cachepool and origin sub-LVs to top-level
devices and leaving them exposed to the user. That is, the
cachepool is unlinked and free to be used with another origin to
form a new cache LV; and the origin is no longer cached.
(Currently, if the cache needs to be flushed, it is done in this
function and the function waits for it to complete before proceeding.
This will be taken out in a future patch in favor of polling.)
---
lib/Makefile.in | 1 +
lib/metadata/cache_manip.c | 219 ++++++++++++++++++++++++++++++++++++++
lib/metadata/metadata-exported.h | 9 ++-
3 files changed, 227 insertions(+), 2 deletions(-)
diff --git a/lib/Makefile.in b/lib/Makefile.in
index 3808269..968ad00 100644
--- a/lib/Makefile.in
+++ b/lib/Makefile.in
@@ -87,6 +87,7 @@ SOURCES =\
locking/locking.c \
locking/no_locking.c \
log/log.c \
+ metadata/cache_manip.c \
metadata/lv.c \
metadata/lv_manip.c \
metadata/merge.c \
diff --git a/lib/metadata/cache_manip.c b/lib/metadata/cache_manip.c
new file mode 100644
index 0000000..2456171
--- /dev/null
+++ b/lib/metadata/cache_manip.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc. All rights reserved.
+ *
+ * This file is part of LVM2.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU Lesser General Public License v.2.1.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "lib.h"
+#include "metadata.h"
+#include "locking.h"
+#include "pv_map.h"
+#include "lvm-string.h"
+#include "toolcontext.h"
+#include "lv_alloc.h"
+#include "pv_alloc.h"
+#include "display.h"
+#include "segtype.h"
+#include "archiver.h"
+#include "activate.h"
+#include "str_list.h"
+#include "defaults.h"
+#include "lvm-exec.h"
+
+/*
+ * lv_cache_create
+ * @pool
+ * @origin
+ *
+ * Given a cache_pool and an origin, link the two and create a
+ * cached LV.
+ *
+ * Returns: cache LV on success, NULL on failure
+ */
+struct logical_volume *lv_cache_create(struct logical_volume *pool,
+ struct logical_volume *origin)
+{
+ const struct segment_type *segtype;
+ struct cmd_context *cmd = pool->vg->cmd;
+ struct logical_volume *cache_lv;
+ struct lv_segment *seg;
+
+ if (!lv_is_cache_pool(pool)) {
+ log_error(INTERNAL_ERROR
+ "%s is not a cache_pool LV", pool->name);
+ return NULL;
+ }
+
+ if (lv_is_cache_type(origin)) {
+ /*
+ * FIXME: We can layer caches, insert_layer_for_lv() would
+ * have to do a better job renaming the LVs in the stack
+ * first so that there isn't a name collision with <name>_corig.
+ * The origin under the origin would become *_corig_corig
+ * before renaming the origin above to *_corig.
+ */
+ log_error(INTERNAL_ERROR
+ "The origin, %s, cannot be of cache type",
+ origin->name);
+ return NULL;
+ }
+
+ if (!(segtype = get_segtype_from_string(cmd, "cache")))
+ return_NULL;
+
+ cache_lv = origin;
+ if (!(origin = insert_layer_for_lv(cmd, cache_lv, CACHE, "_corig")))
+ return_NULL;
+
+ seg = first_seg(cache_lv);
+ seg->segtype = segtype;
+
+ if (!attach_pool_lv(seg, pool, NULL, NULL))
+ return_0;
+
+ return cache_lv;
+}
+
+/*
+ * lv_cache_remove
+ * @cache_lv
+ *
+ * Given a cache LV, remove the cache layer. This will unlink
+ * the origin and cache_pool, remove the cache LV layer, and promote
+ * the origin to a usable non-cached LV of the same name as the
+ * given cache_lv.
+ *
+ * Returns: 1 on success, 0 on failure
+ */
+int lv_cache_remove(struct logical_volume *cache_lv)
+{
+ struct cmd_context *cmd = cache_lv->vg->cmd;
+ char *policy_name;
+ uint64_t dirty_blocks;
+ struct segment_type *segtype;
+ struct lv_segment *cache_seg = first_seg(cache_lv);
+ struct logical_volume *origin_lv;
+ struct logical_volume *cache_pool_lv;
+
+ if (!lv_is_cache(cache_lv))
+ return_0;
+
+ /*
+ * FIXME:
+ * Before the link can be broken, we must ensure that the
+ * cache has been flushed. This may already be the case
+ * if the cache mode is writethrough (or the cleaner
+ * policy is in place from a previous half-finished attempt
+ * to remove the cache_pool). It could take a long time to
+ * flush the cache - it should probably be done in the background.
+ *
+ * Also, if we do perform the flush in the background and we
+ * happen to also be removing the cache/origin LV, then we
+ * could check if the cleaner policy is in place and simply
+ * remove the cache_pool then without waiting for the flush to
+ * complete.
+ */
+ if (!lv_cache_policy_info(cache_lv, &policy_name, NULL, NULL))
+ return_0;
+
+ if (strcmp(policy_name, "cleaner")) {
+ /* We must swap in the cleaner to flush the cache */
+ log_error("Flushing cache for %s", cache_lv->name);
+
+ /*
+ * Is there are clean way to free the memory for the name
+ * and argv when changing the policy?
+ */
+ cache_seg->policy_name = (char *)"cleaner";
+ cache_seg->policy_argc = 0;
+ cache_seg->policy_argv = NULL;
+
+ /* update the kernel to put the cleaner policy in place */
+ if (!vg_write(cache_lv->vg))
+ return_0;
+ if (!suspend_lv(cmd, cache_lv))
+ return_0;
+ if (!vg_commit(cache_lv->vg))
+ return_0;
+ if (!resume_lv(cmd, cache_lv))
+ return_0;
+ }
+
+ //FIXME: use polling to do this...
+ do {
+ if (!lv_cache_block_info(cache_lv, NULL,
+ &dirty_blocks, NULL, NULL))
+ return_0;
+ log_error("%" PRIu64 " blocks must still be flushed.",
+ dirty_blocks);
+ if (dirty_blocks)
+ sleep(5);
+ } while (dirty_blocks);
+
+ cache_pool_lv = first_seg(cache_lv)->pool_lv;
+ if (!detach_pool_lv(first_seg(cache_lv)))
+ return_0;
+
+ origin_lv = seg_lv(first_seg(cache_lv), 0);
+ lv_set_visible(origin_lv);
+
+//FIXME: We should be able to use 'remove_layer_from_lv', but
+// there is a call to 'lv_empty' in there that recursively
+// deletes everything down the tree - including the origin_lv
+// that we are trying to preserve!
+// if (!remove_layer_from_lv(cache_lv, origin_lv))
+// return_0;
+
+ if (!remove_seg_from_segs_using_this_lv(origin_lv, first_seg(cache_lv)))
+ return_0;
+ if (!move_lv_segments(cache_lv, origin_lv, 0, 0))
+ return_0;
+
+ cache_lv->status &= ~CACHE;
+
+ segtype = get_segtype_from_string(cmd, "error");
+ if (!lv_add_virtual_segment(origin_lv, 0,
+ cache_lv->le_count, segtype, NULL))
+ return_0;
+
+ if (!vg_write(cache_lv->vg))
+ return_0;
+
+ /*
+ * suspend_lv on this cache LV will suspend all of the components:
+ * - the top-level cache LV
+ * - the origin
+ * - the cache_pool and all of its sub-LVs
+ */
+ if (!suspend_lv(cmd, cache_lv))
+ return_0;
+
+ if (!vg_commit(cache_lv->vg))
+ return_0;
+
+ /*
+ * resume_lv on this (former) cache LV will resume all
+ * but the cache_pool LV. It must be resumed seperately.
+ */
+ if (!resume_lv(cmd, cache_lv))
+ return_0;
+ if (!resume_lv(cmd, cache_pool_lv))
+ return_0;
+
+ if (!activate_lv(cmd, origin_lv))
+ return_0;
+ if (!deactivate_lv(cmd, origin_lv))
+ return_0;
+ if (!lv_remove(origin_lv))
+ return_0;
+
+ return 1;
+}
diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h
index 97f05fc..d8eef2d 100644
--- a/lib/metadata/metadata-exported.h
+++ b/lib/metadata/metadata-exported.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
- * Copyright (C) 2004-2013 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004-2014 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@@ -1016,9 +1016,14 @@ int lv_raid_reshape(struct logical_volume *lv,
int lv_raid_replace(struct logical_volume *lv, struct dm_list *remove_pvs,
struct dm_list *allocate_pvs);
int lv_raid_remove_missing(struct logical_volume *lv);
-
/* -- metadata/raid_manip.c */
+/* ++ metadata/cache_manip.c */
+struct logical_volume *lv_cache_create(struct logical_volume *pool,
+ struct logical_volume *origin);
+int lv_cache_remove(struct logical_volume *cache_lv);
+/* -- metadata/cache_manip.c */
+
struct cmd_vg *cmd_vg_add(struct dm_pool *mem, struct dm_list *cmd_vgs,
const char *vg_name, const char *vgid,
uint32_t flags);
9 years, 10 months
master - raid: add temporary activation for raid metadata clear
by Zdenek Kabelac
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=ef6c5795a0b050...
Commit: ef6c5795a0b0502b5ef984233fbeb73a9d81f2d0
Parent: ef557b8091ff67f38424d3f8413f3a543275b01d
Author: Zdenek Kabelac <zkabelac(a)redhat.com>
AuthorDate: Tue Feb 4 14:47:52 2014 +0100
Committer: Zdenek Kabelac <zkabelac(a)redhat.com>
CommitterDate: Tue Feb 4 14:51:05 2014 +0100
raid: add temporary activation for raid metadata clear
Use LV_TEMPORARY when activating devices for clearing
raid metadata.
---
WHATS_NEW | 1 +
lib/metadata/raid_manip.c | 6 ++++--
2 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/WHATS_NEW b/WHATS_NEW
index 25293e6..dbacf01 100644
--- a/WHATS_NEW
+++ b/WHATS_NEW
@@ -1,5 +1,6 @@
Version 2.02.106 -
====================================
+ Avoid exposing temporary devices when initializing raid metadata volumes.
Add internal tags command to display any tags defined on the host.
Prohibit use of external origin with size incompatible with thin pool.
Avoid trying to convert single to thin pool and volume at the same time.
diff --git a/lib/metadata/raid_manip.c b/lib/metadata/raid_manip.c
index a29ea85..3f2b043 100644
--- a/lib/metadata/raid_manip.c
+++ b/lib/metadata/raid_manip.c
@@ -174,11 +174,13 @@ static int _clear_lv(struct logical_volume *lv)
if (test_mode())
return 1;
- if (!was_active && !activate_lv_excl_local(lv->vg->cmd, lv)) {
- log_error("Failed to activate %s for clearing",
+ lv->status |= LV_TEMPORARY;
+ if (!was_active && !activate_lv_local(lv->vg->cmd, lv)) {
+ log_error("Failed to activate localy %s for clearing",
lv->name);
return 0;
}
+ lv->status &= ~LV_TEMPORARY;
log_verbose("Clearing metadata area of %s/%s",
lv->vg->name, lv->name);
9 years, 10 months
master - tests: update test
by Zdenek Kabelac
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=ef557b8091ff67...
Commit: ef557b8091ff67f38424d3f8413f3a543275b01d
Parent: 324781953178ed9787257334a73d93e731add0c4
Author: Zdenek Kabelac <zkabelac(a)redhat.com>
AuthorDate: Mon Feb 3 18:26:55 2014 +0100
Committer: Zdenek Kabelac <zkabelac(a)redhat.com>
CommitterDate: Tue Feb 4 14:49:38 2014 +0100
tests: update test
Remove some unneeded traces and outputs.
---
test/lib/aux.sh | 3 ++-
test/shell/lvconvert-repair-dmeventd.sh | 2 +-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/test/lib/aux.sh b/test/lib/aux.sh
index 2a882a2..3c02d40 100644
--- a/test/lib/aux.sh
+++ b/test/lib/aux.sh
@@ -52,6 +52,7 @@ prepare_clvmd() {
prepare_dmeventd() {
if pgrep dmeventd ; then
+ rm -f debug.log
echo "Cannot test dmeventd with real dmeventd ($(pgrep dmeventd)) running."
skip
fi
@@ -654,7 +655,7 @@ skip_if_raid456_replace_broken() {
udev_wait() {
pgrep udev >/dev/null || return 0
- which udevadm >/dev/null || return 0
+ which udevadm &>/dev/null || return 0
if test -n "$1" ; then
udevadm settle --exit-if-exists="$1" || true
else
diff --git a/test/shell/lvconvert-repair-dmeventd.sh b/test/shell/lvconvert-repair-dmeventd.sh
index 55eee37..1604ecb 100644
--- a/test/shell/lvconvert-repair-dmeventd.sh
+++ b/test/shell/lvconvert-repair-dmeventd.sh
@@ -14,8 +14,8 @@
which mkfs.ext2 || skip
aux skip_if_mirror_recovery_broken
-aux prepare_vg 5
aux prepare_dmeventd
+aux prepare_vg 5
lvcreate -aey --type mirror -m 3 --ignoremonitoring -L 1 -n 4way $vg
lvchange --monitor y $vg/4way
9 years, 10 months
master - pool: Make another thin pool fn generic for cache usage also
by Jonathan Brassow
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=324781953178ed...
Commit: 324781953178ed9787257334a73d93e731add0c4
Parent: 131383963ff7cc9b101b9a80a8e495473ccec4cf
Author: Jonathan Brassow <jbrassow(a)redhat.com>
AuthorDate: Tue Feb 4 07:03:52 2014 -0600
Committer: Jonathan Brassow <jbrassow(a)redhat.com>
CommitterDate: Tue Feb 4 07:03:52 2014 -0600
pool: Make another thin pool fn generic for cache usage also
Make '_recalculate_thin_pool_chunk_size_with_dev_hints' so it can
be used for cache and thin pools.
---
conf/example.conf.in | 11 +++++++++
lib/config/config_settings.h | 3 ++
lib/config/defaults.h | 1 +
lib/metadata/lv_manip.c | 52 +++++++++++++++++++++++++++++-------------
libdm/libdevmapper.h | 16 +++++++++++++
5 files changed, 67 insertions(+), 16 deletions(-)
diff --git a/conf/example.conf.in b/conf/example.conf.in
index 7b98b4b..08ce877 100644
--- a/conf/example.conf.in
+++ b/conf/example.conf.in
@@ -305,6 +305,17 @@ allocation {
# placed on different PVs from the cache_pool data.
cache_pool_metadata_require_separate_pvs = 0
+ # Specify the minimal chunk size (in kiB) for cache pool volumes.
+ # Using a chunk_size that is too large can result in wasteful use of
+ # the cache, where small reads and writes can cause large sections of
+ # an LV to be mapped into the cache. However, choosing a chunk_size
+ # that is too small can result in more overhead trying to manage the
+ # numerous chunks that become mapped into the cache. The former is
+ # more of a problem than the latter in most cases, so we default to
+ # a value that is on the smaller end of the spectrum. Supported values
+ # range from 32(kiB) to 1048576 in multiples of 32.
+ # cache_pool_chunk_size = 64
+
# Set to 1 to guarantee that thin pool metadata will always
# be placed on different PVs from the pool data.
thin_pool_metadata_require_separate_pvs = 0
diff --git a/lib/config/config_settings.h b/lib/config/config_settings.h
index e9ae494..ae91e2d 100644
--- a/lib/config/config_settings.h
+++ b/lib/config/config_settings.h
@@ -107,7 +107,10 @@ cfg(allocation_maximise_cling_CFG, "maximise_cling", allocation_CFG_SECTION, 0,
cfg(allocation_use_blkid_wiping_CFG, "use_blkid_wiping", allocation_CFG_SECTION, 0, CFG_TYPE_BOOL, 1, vsn(2, 2, 105), NULL)
cfg(allocation_wipe_signatures_when_zeroing_new_lvs_CFG, "wipe_signatures_when_zeroing_new_lvs", allocation_CFG_SECTION, 0, CFG_TYPE_BOOL, 1, vsn(2, 2, 105), NULL)
cfg(allocation_mirror_logs_require_separate_pvs_CFG, "mirror_logs_require_separate_pvs", allocation_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_MIRROR_LOGS_REQUIRE_SEPARATE_PVS, vsn(2, 2, 85), NULL)
+
cfg(allocation_cache_pool_metadata_require_separate_pvs_CFG, "cache_pool_metadata_require_separate_pvs", allocation_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_CACHE_POOL_METADATA_REQUIRE_SEPARATE_PVS, vsn(2, 2, 106), NULL)
+cfg(allocation_cache_pool_chunk_size_CFG, "cache_pool_chunk_size", allocation_CFG_SECTION, 0, CFG_TYPE_INT, 0, vsn(2, 2, 106), NULL)
+
cfg(allocation_thin_pool_metadata_require_separate_pvs_CFG, "thin_pool_metadata_require_separate_pvs", allocation_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_THIN_POOL_METADATA_REQUIRE_SEPARATE_PVS, vsn(2, 2, 89), NULL)
cfg(allocation_thin_pool_zero_CFG, "thin_pool_zero", allocation_CFG_SECTION, CFG_PROFILABLE, CFG_TYPE_BOOL, DEFAULT_THIN_POOL_ZERO, vsn(2, 2, 99), NULL)
cfg(allocation_thin_pool_discards_CFG, "thin_pool_discards", allocation_CFG_SECTION, CFG_PROFILABLE, CFG_TYPE_STRING, DEFAULT_THIN_POOL_DISCARDS, vsn(2, 2, 99), NULL)
diff --git a/lib/config/defaults.h b/lib/config/defaults.h
index a388e13..4f53b3f 100644
--- a/lib/config/defaults.h
+++ b/lib/config/defaults.h
@@ -80,6 +80,7 @@
#define DEFAULT_POOL_METADATA_SPARE 1
#define DEFAULT_CACHE_POOL_METADATA_REQUIRE_SEPARATE_PVS 0
+#define DEFAULT_CACHE_POOL_CHUNK_SIZE 64 /* KB */
#define DEFAULT_UMASK 0077
diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c
index bd1e57c..e95b7e5 100644
--- a/lib/metadata/lv_manip.c
+++ b/lib/metadata/lv_manip.c
@@ -5628,8 +5628,8 @@ static unsigned long _lcm(unsigned long n1, unsigned long n2)
return (n1 * n2) / _gcd(n1, n2);
}
-static int _recalculate_thin_pool_chunk_size_with_dev_hints(struct lvcreate_params *lp,
- struct logical_volume *pool_lv)
+static int _recalculate_pool_chunk_size_with_dev_hints(struct lvcreate_params *lp,
+ struct logical_volume *pool_lv)
{
struct logical_volume *pool_data_lv;
struct lv_segment *seg;
@@ -5637,13 +5637,34 @@ static int _recalculate_thin_pool_chunk_size_with_dev_hints(struct lvcreate_para
struct cmd_context *cmd = pool_lv->vg->cmd;
unsigned long previous_hint = 0, hint = 0;
uint32_t chunk_size = lp->chunk_size;
- uint32_t default_chunk_size = lp->thin_chunk_size_calc_policy == THIN_CHUNK_SIZE_CALC_METHOD_PERFORMANCE ?
- DEFAULT_THIN_POOL_CHUNK_SIZE_PERFORMANCE*2 : DEFAULT_THIN_POOL_CHUNK_SIZE*2;
+ uint32_t default_chunk_size;
+ uint32_t min_chunk, max_chunk;
- if (lp->passed_args & PASS_ARG_CHUNK_SIZE ||
- find_config_tree_int(cmd, allocation_thin_pool_chunk_size_CFG, NULL))
+ if (lp->passed_args & PASS_ARG_CHUNK_SIZE)
goto out;
+ if (seg_is_thin_pool(lp)) {
+ if (find_config_tree_int(cmd, allocation_thin_pool_chunk_size_CFG, NULL))
+ goto out;
+
+ min_chunk = DM_THIN_MIN_DATA_BLOCK_SIZE;
+ max_chunk = DM_THIN_MAX_DATA_BLOCK_SIZE;
+ if (lp->thin_chunk_size_calc_policy == THIN_CHUNK_SIZE_CALC_METHOD_PERFORMANCE)
+ default_chunk_size = DEFAULT_THIN_POOL_CHUNK_SIZE_PERFORMANCE*2;
+ else
+ default_chunk_size = DEFAULT_THIN_POOL_CHUNK_SIZE*2;
+ } else if (seg_is_cache_pool(lp)) {
+ if (find_config_tree_int(cmd, allocation_cache_pool_chunk_size_CFG, NULL))
+ goto out;
+ min_chunk = DM_CACHE_MIN_DATA_BLOCK_SIZE;
+ max_chunk = DM_CACHE_MAX_DATA_BLOCK_SIZE;
+ default_chunk_size = DEFAULT_CACHE_POOL_CHUNK_SIZE*2;
+ } else {
+ log_error(INTERNAL_ERROR "%s is not a thin pool or cache pool",
+ pool_lv->name);
+ return 0;
+ }
+
pool_data_lv = seg_lv(first_seg(pool_lv), 0);
dm_list_iterate_items(seg, &pool_data_lv->segments) {
@@ -5661,19 +5682,18 @@ static int _recalculate_thin_pool_chunk_size_with_dev_hints(struct lvcreate_para
}
if (!hint) {
- log_debug_alloc("No usable device hint found while recalculating "
- "thin pool chunk size for %s.", pool_lv->name);
+ log_debug_alloc("No usable device hint found while recalculating"
+ " thin pool chunk size for %s.", pool_lv->name);
goto out;
}
- if (hint < DM_THIN_MIN_DATA_BLOCK_SIZE ||
- hint > DM_THIN_MAX_DATA_BLOCK_SIZE) {
- log_debug_alloc("Calculated chunk size value of %ld sectors "
- "for thin pool %s is out of allowed range (%d-%d).",
- hint, pool_lv->name, DM_THIN_MIN_DATA_BLOCK_SIZE,
- DM_THIN_MAX_DATA_BLOCK_SIZE);
+ if ((hint < min_chunk) || (hint > max_chunk)) {
+ log_debug_alloc("Calculated chunk size value of %ld sectors for"
+ " thin pool %s is out of allowed range (%d-%d).",
+ hint, pool_lv->name, min_chunk, max_chunk);
} else
- chunk_size = hint >= default_chunk_size ? hint : default_chunk_size;
+ chunk_size = (hint >= default_chunk_size) ?
+ hint : default_chunk_size;
out:
first_seg(pool_lv)->chunk_size = chunk_size;
return 1;
@@ -5989,7 +6009,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
return_NULL;
if (seg_is_thin_pool(lp)) {
- if (!_recalculate_thin_pool_chunk_size_with_dev_hints(lp, lv))
+ if (!_recalculate_pool_chunk_size_with_dev_hints(lp, lv))
return_NULL;
first_seg(lv)->zero_new_blocks = lp->zero ? 1 : 0;
first_seg(lv)->discards = lp->discards;
diff --git a/libdm/libdevmapper.h b/libdm/libdevmapper.h
index 131bd3f..b50501e 100644
--- a/libdm/libdevmapper.h
+++ b/libdm/libdevmapper.h
@@ -715,6 +715,22 @@ int dm_tree_node_add_raid_target(struct dm_tree_node *node,
uint64_t rebuilds,
uint64_t flags);
+/*
+ * Defines bellow are based on kernel's dm-cache.c defines
+ * DM_CACHE_MIN_DATA_BLOCK_SIZE (32 * 1024 >> SECTOR_SHIFT)
+ * DM_CACHE_MAX_DATA_BLOCK_SIZE (1024 * 1024 * 1024 >> SECTOR_SHIFT)
+ */
+#define DM_CACHE_MIN_DATA_BLOCK_SIZE (UINT32_C(64))
+#define DM_CACHE_MAX_DATA_BLOCK_SIZE (UINT32_C(2097152))
+/*
+ * Max supported size for cache pool metadata device.
+ * Limitation is hardcoded into the kernel and bigger device sizes
+ * are not accepted.
+ *
+ * Limit defined in drivers/md/dm-cache-metadata.h
+ */
+#define DM_CACHE_METADATA_MAX_SECTORS DM_THIN_METADATA_MAX_SECTORS
+
struct dm_tree_node_raid_params {
const char *raid_type;
9 years, 10 months