master - devicemapper: retry remove even for subLVs
by Zdenek Kabelac
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=6cee8f1b063dcf5d809...
Commit: 6cee8f1b063dcf5d809e14de38ba489ce5b8f562
Parent: c1703845c3d82e381c545b8ad8bde68bafc2fbcf
Author: Zdenek Kabelac <zkabelac(a)redhat.com>
AuthorDate: Thu Nov 8 12:12:58 2018 +0100
Committer: Zdenek Kabelac <zkabelac(a)redhat.com>
CommitterDate: Thu Nov 8 12:20:57 2018 +0100
devicemapper: retry remove even for subLVs
With older systems and udevs we don't have control over scanning of lvm2
internal devices - so far we set retry-removal only for top-level LVs,
but in occasional cases udev can be 'fast enough' to open device for
scanning and prevent removal of such device from DM table.
So to combat this case - try to pass 'retry' flag also for removal of
internal device so see how many races can go away with this simple
patch.
Note: patch is applied only to internal version of libdm so the external
API remains working in the old way for now.
---
device_mapper/libdm-deptree.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/device_mapper/libdm-deptree.c b/device_mapper/libdm-deptree.c
index 89a0a48..06f10ae 100644
--- a/device_mapper/libdm-deptree.c
+++ b/device_mapper/libdm-deptree.c
@@ -1804,7 +1804,7 @@ static int _dm_tree_deactivate_children(struct dm_tree_node *dnode,
if (!_deactivate_node(name, info.major, info.minor,
&child->dtree->cookie, child->udev_flags,
- (level == 0) ? child->dtree->retry_remove : 0)) {
+ child->dtree->retry_remove)) {
log_error("Unable to deactivate %s (" FMTu32 ":"
FMTu32 ").", name, info.major, info.minor);
r = 0;
5 years, 5 months
master - activation: trimming string is expected
by Zdenek Kabelac
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=c1703845c3d82e381c5...
Commit: c1703845c3d82e381c545b8ad8bde68bafc2fbcf
Parent: 1dc5603f73940bc59d09afa67c2e8c4da0ce8526
Author: Zdenek Kabelac <zkabelac(a)redhat.com>
AuthorDate: Thu Nov 8 10:02:28 2018 +0100
Committer: Zdenek Kabelac <zkabelac(a)redhat.com>
CommitterDate: Thu Nov 8 12:20:57 2018 +0100
activation: trimming string is expected
Commit 813347cf84617a946d9184f44c5fbd275bb41766 added extra validation,
however in this particular we do want to trim suffix out so rather ignore
resulting error code here intentionaly.
---
lib/activate/dev_manager.c | 7 ++-----
1 files changed, 2 insertions(+), 5 deletions(-)
diff --git a/lib/activate/dev_manager.c b/lib/activate/dev_manager.c
index 49f425b..f0c5254 100644
--- a/lib/activate/dev_manager.c
+++ b/lib/activate/dev_manager.c
@@ -2061,11 +2061,8 @@ static int _check_holder(struct dev_manager *dm, struct dm_tree *dtree,
if (!strncmp(uuid, (char*)&lv->vg->id, sizeof(lv->vg->id)) &&
!dm_tree_find_node_by_uuid(dtree, uuid)) {
- if (!dm_strncpy((char*)&id, uuid, 2 * sizeof(struct id) + 1)) {
- log_error(INTERNAL_ERROR "Too long UUID %s in VG %s.",
- uuid, lv->vg->name);
- goto out;
- }
+ /* trims any UUID suffix (i.e. -cow) */
+ (void) dm_strncpy((char*)&id, uuid, 2 * sizeof(struct id) + 1);
/* If UUID is not yet in dtree, look for matching LV */
if (!(lv_det = find_lv_in_vg_by_lvid(lv->vg, &id))) {
5 years, 5 months
master - devices: reuse bcache fd when getting block size
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=1dc5603f73940bc59d0...
Commit: 1dc5603f73940bc59d09afa67c2e8c4da0ce8526
Parent: 3ae55695708b5d702f21daf776607d30cebe69c3
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Tue Nov 6 16:03:17 2018 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Tue Nov 6 16:36:18 2018 -0600
devices: reuse bcache fd when getting block size
This avoids an unnecessary open() on the device.
---
lib/device/dev-io.c | 27 +++++++++++++++++++--------
1 files changed, 19 insertions(+), 8 deletions(-)
diff --git a/lib/device/dev-io.c b/lib/device/dev-io.c
index dc95131..4460e55 100644
--- a/lib/device/dev-io.c
+++ b/lib/device/dev-io.c
@@ -148,16 +148,27 @@ static int _io(struct device_area *where, char *buffer, int should_write, dev_io
int dev_get_block_size(struct device *dev, unsigned int *physical_block_size, unsigned int *block_size)
{
const char *name = dev_name(dev);
- int needs_open;
+ int fd = dev->bcache_fd;
+ int do_close = 0;
int r = 1;
- needs_open = (!dev->open_count && (dev->phys_block_size == -1 || dev->block_size == -1));
+ if ((dev->phys_block_size > 0) && (dev->block_size > 0)) {
+ *physical_block_size = (unsigned int)dev->phys_block_size;
+ *block_size = (unsigned int)dev->block_size;
+ return 1;
+ }
- if (needs_open && !dev_open_readonly(dev))
- return_0;
+ if (fd <= 0) {
+ if (!dev->open_count) {
+ if (!dev_open_readonly(dev))
+ return_0;
+ do_close = 1;
+ }
+ fd = dev_fd(dev);
+ }
if (dev->block_size == -1) {
- if (ioctl(dev_fd(dev), BLKBSZGET, &dev->block_size) < 0) {
+ if (ioctl(fd, BLKBSZGET, &dev->block_size) < 0) {
log_sys_error("ioctl BLKBSZGET", name);
r = 0;
goto out;
@@ -168,7 +179,7 @@ int dev_get_block_size(struct device *dev, unsigned int *physical_block_size, un
#ifdef BLKPBSZGET
/* BLKPBSZGET is available in kernel >= 2.6.32 only */
if (dev->phys_block_size == -1) {
- if (ioctl(dev_fd(dev), BLKPBSZGET, &dev->phys_block_size) < 0) {
+ if (ioctl(fd, BLKPBSZGET, &dev->phys_block_size) < 0) {
log_sys_error("ioctl BLKPBSZGET", name);
r = 0;
goto out;
@@ -178,7 +189,7 @@ int dev_get_block_size(struct device *dev, unsigned int *physical_block_size, un
#elif defined (BLKSSZGET)
/* if we can't get physical block size, just use logical block size instead */
if (dev->phys_block_size == -1) {
- if (ioctl(dev_fd(dev), BLKSSZGET, &dev->phys_block_size) < 0) {
+ if (ioctl(fd, BLKSSZGET, &dev->phys_block_size) < 0) {
log_sys_error("ioctl BLKSSZGET", name);
r = 0;
goto out;
@@ -196,7 +207,7 @@ int dev_get_block_size(struct device *dev, unsigned int *physical_block_size, un
*physical_block_size = (unsigned int) dev->phys_block_size;
*block_size = (unsigned int) dev->block_size;
out:
- if (needs_open && !dev_close_immediate(dev))
+ if (do_close && !dev_close_immediate(dev))
stack;
return r;
5 years, 5 months
master - Add dm-writecache support
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=3ae55695708b5d702f2...
Commit: 3ae55695708b5d702f21daf776607d30cebe69c3
Parent: cac4a9743acb826d785c0e51e9a752d8959ced80
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Mon Aug 27 14:53:09 2018 -0500
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Tue Nov 6 14:18:41 2018 -0600
Add dm-writecache support
dm-writecache is used like dm-cache with a standard LV
as the cache.
$ lvcreate -n main -L 128M -an foo /dev/loop0
$ lvcreate -n fast -L 32M -an foo /dev/pmem0
$ lvconvert --type writecache --cachepool fast foo/main
$ lvs -a foo -o+devices
LV VG Attr LSize Origin Devices
[fast] foo -wi------- 32.00m /dev/pmem0(0)
main foo Cwi------- 128.00m [main_wcorig] main_wcorig(0)
[main_wcorig] foo -wi------- 128.00m /dev/loop0(0)
$ lvchange -ay foo/main
$ dmsetup table
foo-main_wcorig: 0 262144 linear 7:0 2048
foo-main: 0 262144 writecache p 253:4 253:3 4096 0
foo-fast: 0 65536 linear 259:0 2048
$ lvchange -an foo/main
$ lvconvert --splitcache foo/main
$ lvs -a foo -o+devices
LV VG Attr LSize Devices
fast foo -wi------- 32.00m /dev/pmem0(0)
main foo -wi------- 128.00m /dev/loop0(0)
---
configure.ac | 18 ++
device_mapper/all.h | 48 +++
device_mapper/libdm-deptree.c | 134 ++++++++
device_mapper/libdm-targets.c | 32 ++
lib/Makefile.in | 1 +
lib/activate/activate.c | 20 ++
lib/activate/activate.h | 5 +
lib/activate/dev_manager.c | 47 +++
lib/activate/dev_manager.h | 3 +
lib/commands/toolcontext.c | 5 +
lib/device/dev-type.c | 42 +++
lib/device/dev-type.h | 2 +
lib/format_text/flags.c | 1 +
lib/metadata/lv.c | 4 +-
lib/metadata/lv_manip.c | 10 +
lib/metadata/merge.c | 2 +-
lib/metadata/metadata-exported.h | 8 +
lib/metadata/metadata.c | 35 +++
lib/metadata/segtype.h | 6 +
lib/writecache/writecache.c | 314 +++++++++++++++++++
test/shell/writecache.sh | 129 ++++++++
tools/args.h | 3 +
tools/command-lines.in | 24 +-
tools/lv_types.h | 1 +
tools/lvconvert.c | 621 +++++++++++++++++++++++++++++++++++---
tools/lvmcmdline.c | 7 +-
tools/toollib.c | 13 +-
tools/tools.h | 3 +-
28 files changed, 1481 insertions(+), 57 deletions(-)
diff --git a/configure.ac b/configure.ac
index 70fd674..8549a80 100644
--- a/configure.ac
+++ b/configure.ac
@@ -643,6 +643,24 @@ AC_DEFINE_UNQUOTED([VDO_FORMAT_CMD], ["$VDO_FORMAT_CMD"],
#AC_MSG_RESULT($VDO_LIB)
################################################################################
+dnl -- writecache inclusion type
+AC_MSG_CHECKING(whether to include writecache)
+AC_ARG_WITH(writecache,
+ AC_HELP_STRING([--with-writecache=TYPE],
+ [writecache support: internal/none [internal]]),
+ WRITECACHE=$withval, WRITECACHE="none")
+
+AC_MSG_RESULT($WRITECACHE)
+
+case "$WRITECACHE" in
+ none) ;;
+ internal)
+ AC_DEFINE([WRITECACHE_INTERNAL], 1, [Define to 1 to include built-in support for writecache.])
+ ;;
+ *) AC_MSG_ERROR([--with-writecache parameter invalid]) ;;
+esac
+
+################################################################################
dnl -- Disable readline
AC_ARG_ENABLE([readline],
AC_HELP_STRING([--disable-readline], [disable readline support]),
diff --git a/device_mapper/all.h b/device_mapper/all.h
index 0f01075..6fe80f8 100644
--- a/device_mapper/all.h
+++ b/device_mapper/all.h
@@ -378,6 +378,16 @@ struct dm_status_cache {
int dm_get_status_cache(struct dm_pool *mem, const char *params,
struct dm_status_cache **status);
+struct dm_status_writecache {
+ uint32_t error;
+ uint64_t total_blocks;
+ uint64_t free_blocks;
+ uint64_t writeback_blocks;
+};
+
+int dm_get_status_writecache(struct dm_pool *mem, const char *params,
+ struct dm_status_writecache **status);
+
/*
* Parse params from STATUS call for snapshot target
*
@@ -918,6 +928,44 @@ int dm_tree_node_add_cache_target(struct dm_tree_node *node,
uint64_t data_len,
uint32_t data_block_size);
+struct writecache_settings {
+ uint64_t high_watermark;
+ uint64_t low_watermark;
+ uint64_t writeback_jobs;
+ uint64_t autocommit_blocks;
+ uint64_t autocommit_time; /* in milliseconds */
+ uint32_t fua;
+ uint32_t nofua;
+
+ /*
+ * Allow an unrecognized key and its val to be passed to the kernel for
+ * cases where a new kernel setting is added but lvm doesn't know about
+ * it yet.
+ */
+ char *new_key;
+ char *new_val;
+
+ /*
+ * Flag is 1 if a value has been set.
+ */
+ unsigned high_watermark_set:1;
+ unsigned low_watermark_set:1;
+ unsigned writeback_jobs_set:1;
+ unsigned autocommit_blocks_set:1;
+ unsigned autocommit_time_set:1;
+ unsigned fua_set:1;
+ unsigned nofua_set:1;
+};
+
+int dm_tree_node_add_writecache_target(struct dm_tree_node *node,
+ uint64_t size,
+ const char *origin_uuid,
+ const char *cache_uuid,
+ int pmem,
+ uint32_t writecache_block_size,
+ struct writecache_settings *settings);
+
+
/*
* VDO target
*/
diff --git a/device_mapper/libdm-deptree.c b/device_mapper/libdm-deptree.c
index 13239c7..89a0a48 100644
--- a/device_mapper/libdm-deptree.c
+++ b/device_mapper/libdm-deptree.c
@@ -37,6 +37,7 @@ enum {
SEG_SNAPSHOT_MERGE,
SEG_STRIPED,
SEG_ZERO,
+ SEG_WRITECACHE,
SEG_THIN_POOL,
SEG_THIN,
SEG_VDO,
@@ -76,6 +77,7 @@ static const struct {
{ SEG_SNAPSHOT_MERGE, "snapshot-merge" },
{ SEG_STRIPED, "striped" },
{ SEG_ZERO, "zero"},
+ { SEG_WRITECACHE, "writecache"},
{ SEG_THIN_POOL, "thin-pool"},
{ SEG_THIN, "thin"},
{ SEG_VDO, "vdo" },
@@ -212,6 +214,11 @@ struct load_segment {
struct dm_tree_node *vdo_data; /* VDO */
struct dm_vdo_target_params vdo_params; /* VDO */
const char *vdo_name; /* VDO - device name is ALSO passed as table arg */
+
+ struct dm_tree_node *writecache_node; /* writecache */
+ int writecache_pmem; /* writecache, 1 if pmem, 0 if ssd */
+ uint32_t writecache_block_size; /* writecache, in bytes */
+ struct writecache_settings writecache_settings; /* writecache */
};
/* Per-device properties */
@@ -2605,6 +2612,88 @@ static int _cache_emit_segment_line(struct dm_task *dmt,
return 1;
}
+static int _writecache_emit_segment_line(struct dm_task *dmt,
+ struct load_segment *seg,
+ char *params, size_t paramsize)
+{
+ int pos = 0;
+ int count = 0;
+ uint32_t block_size;
+ char origin_dev[DM_FORMAT_DEV_BUFSIZE];
+ char cache_dev[DM_FORMAT_DEV_BUFSIZE];
+
+ if (!_build_dev_string(origin_dev, sizeof(origin_dev), seg->origin))
+ return_0;
+
+ if (!_build_dev_string(cache_dev, sizeof(cache_dev), seg->writecache_node))
+ return_0;
+
+ if (seg->writecache_settings.high_watermark_set)
+ count += 2;
+ if (seg->writecache_settings.low_watermark_set)
+ count += 2;
+ if (seg->writecache_settings.writeback_jobs_set)
+ count += 2;
+ if (seg->writecache_settings.autocommit_blocks_set)
+ count += 2;
+ if (seg->writecache_settings.autocommit_time_set)
+ count += 2;
+ if (seg->writecache_settings.fua_set)
+ count += 1;
+ if (seg->writecache_settings.nofua_set)
+ count += 1;
+ if (seg->writecache_settings.new_key)
+ count += 2;
+
+ if (!(block_size = seg->writecache_block_size))
+ block_size = 4096;
+
+ EMIT_PARAMS(pos, "%s %s %s %u %d",
+ seg->writecache_pmem ? "p" : "s",
+ origin_dev, cache_dev, block_size, count);
+
+ if (seg->writecache_settings.high_watermark_set) {
+ EMIT_PARAMS(pos, " high_watermark %llu",
+ (unsigned long long)seg->writecache_settings.high_watermark);
+ }
+
+ if (seg->writecache_settings.low_watermark_set) {
+ EMIT_PARAMS(pos, " low_watermark %llu",
+ (unsigned long long)seg->writecache_settings.low_watermark);
+ }
+
+ if (seg->writecache_settings.writeback_jobs_set) {
+ EMIT_PARAMS(pos, " writeback_jobs %llu",
+ (unsigned long long)seg->writecache_settings.writeback_jobs);
+ }
+
+ if (seg->writecache_settings.autocommit_blocks_set) {
+ EMIT_PARAMS(pos, " autocommit_blocks %llu",
+ (unsigned long long)seg->writecache_settings.autocommit_blocks);
+ }
+
+ if (seg->writecache_settings.autocommit_time_set) {
+ EMIT_PARAMS(pos, " autocommit_time %llu",
+ (unsigned long long)seg->writecache_settings.autocommit_time);
+ }
+
+ if (seg->writecache_settings.fua_set) {
+ EMIT_PARAMS(pos, " fua");
+ }
+
+ if (seg->writecache_settings.nofua_set) {
+ EMIT_PARAMS(pos, " nofua");
+ }
+
+ if (seg->writecache_settings.new_key) {
+ EMIT_PARAMS(pos, " %s %s",
+ seg->writecache_settings.new_key,
+ seg->writecache_settings.new_val);
+ }
+
+ return 1;
+}
+
static int _thin_pool_emit_segment_line(struct dm_task *dmt,
struct load_segment *seg,
char *params, size_t paramsize)
@@ -2784,6 +2873,10 @@ static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
if (!_cache_emit_segment_line(dmt, seg, params, paramsize))
return_0;
break;
+ case SEG_WRITECACHE:
+ if (!_writecache_emit_segment_line(dmt, seg, params, paramsize))
+ return_0;
+ break;
}
switch(seg->type) {
@@ -2795,6 +2888,7 @@ static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
case SEG_THIN_POOL:
case SEG_THIN:
case SEG_CACHE:
+ case SEG_WRITECACHE:
break;
case SEG_CRYPT:
case SEG_LINEAR:
@@ -3583,6 +3677,46 @@ int dm_tree_node_add_cache_target(struct dm_tree_node *node,
return 1;
}
+int dm_tree_node_add_writecache_target(struct dm_tree_node *node,
+ uint64_t size,
+ const char *origin_uuid,
+ const char *cache_uuid,
+ int pmem,
+ uint32_t writecache_block_size,
+ struct writecache_settings *settings)
+{
+ struct load_segment *seg;
+
+ if (!(seg = _add_segment(node, SEG_WRITECACHE, size)))
+ return_0;
+
+ seg->writecache_pmem = pmem;
+ seg->writecache_block_size = writecache_block_size;
+
+ if (!(seg->writecache_node = dm_tree_find_node_by_uuid(node->dtree, cache_uuid))) {
+ log_error("Missing writecache's cache uuid %s.", cache_uuid);
+ return 0;
+ }
+ if (!_link_tree_nodes(node, seg->writecache_node))
+ return_0;
+
+ if (!(seg->origin = dm_tree_find_node_by_uuid(node->dtree, origin_uuid))) {
+ log_error("Missing writecache's origin uuid %s.", origin_uuid);
+ return 0;
+ }
+ if (!_link_tree_nodes(node, seg->origin))
+ return_0;
+
+ memcpy(&seg->writecache_settings, settings, sizeof(struct writecache_settings));
+
+ if (settings->new_key && settings->new_val) {
+ seg->writecache_settings.new_key = dm_pool_strdup(node->dtree->mem, settings->new_key);
+ seg->writecache_settings.new_val = dm_pool_strdup(node->dtree->mem, settings->new_val);
+ }
+
+ return 1;
+}
+
int dm_tree_node_add_replicator_target(struct dm_tree_node *node,
uint64_t size,
const char *rlog_uuid,
diff --git a/device_mapper/libdm-targets.c b/device_mapper/libdm-targets.c
index 5ab4701..607f429 100644
--- a/device_mapper/libdm-targets.c
+++ b/device_mapper/libdm-targets.c
@@ -346,6 +346,38 @@ bad:
return 0;
}
+/*
+ * From linux/Documentation/device-mapper/writecache.txt
+ *
+ * Status:
+ * 1. error indicator - 0 if there was no error, otherwise error number
+ * 2. the number of blocks
+ * 3. the number of free blocks
+ * 4. the number of blocks under writeback
+ */
+
+int dm_get_status_writecache(struct dm_pool *mem, const char *params,
+ struct dm_status_writecache **status)
+{
+ struct dm_status_writecache *s;
+
+ if (!(s = dm_pool_zalloc(mem, sizeof(struct dm_status_writecache))))
+ return_0;
+
+ if (sscanf(params, "%u %llu %llu %llu",
+ &s->error,
+ (unsigned long long *)&s->total_blocks,
+ (unsigned long long *)&s->free_blocks,
+ (unsigned long long *)&s->writeback_blocks) != 4) {
+ log_error("Failed to parse writecache params: %s.", params);
+ dm_pool_free(mem, s);
+ return 0;
+ }
+
+ *status = s;
+ return 1;
+}
+
int parse_thin_pool_status(const char *params, struct dm_status_thin_pool *s)
{
int pos;
diff --git a/lib/Makefile.in b/lib/Makefile.in
index 1b170ee..722e954 100644
--- a/lib/Makefile.in
+++ b/lib/Makefile.in
@@ -19,6 +19,7 @@ top_builddir = @top_builddir@
SOURCES =\
activate/activate.c \
cache/lvmcache.c \
+ writecache/writecache.c \
cache_segtype/cache.c \
commands/toolcontext.c \
config/config.c \
diff --git a/lib/activate/activate.c b/lib/activate/activate.c
index 1e195b6..b1f7391 100644
--- a/lib/activate/activate.c
+++ b/lib/activate/activate.c
@@ -1173,6 +1173,26 @@ out:
return r;
}
+int lv_writecache_message(const struct logical_volume *lv, const char *msg)
+{
+ int r = 0;
+ struct dev_manager *dm;
+
+ if (!lv_info(lv->vg->cmd, lv, 0, NULL, 0, 0)) {
+ log_error("Unable to send message to an inactive logical volume.");
+ return 0;
+ }
+
+ if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
+ return_0;
+
+ r = dev_manager_writecache_message(dm, lv, msg);
+
+ dev_manager_destroy(dm);
+
+ return r;
+}
+
/*
* Return dm_status_cache for cache volume, accept also cache pool
*
diff --git a/lib/activate/activate.h b/lib/activate/activate.h
index 9530c36..8f9c918 100644
--- a/lib/activate/activate.h
+++ b/lib/activate/activate.h
@@ -38,6 +38,7 @@ typedef enum {
SEG_STATUS_THIN,
SEG_STATUS_THIN_POOL,
SEG_STATUS_VDO_POOL,
+ SEG_STATUS_WRITECACHE,
SEG_STATUS_UNKNOWN
} lv_seg_status_type_t;
@@ -51,6 +52,7 @@ struct lv_seg_status {
struct dm_status_snapshot *snapshot;
struct dm_status_thin *thin;
struct dm_status_thin_pool *thin_pool;
+ struct dm_status_writecache *writecache;
struct lv_status_vdo vdo_pool;
};
};
@@ -184,6 +186,7 @@ int lv_raid_dev_health(const struct logical_volume *lv, char **dev_health);
int lv_raid_mismatch_count(const struct logical_volume *lv, uint64_t *cnt);
int lv_raid_sync_action(const struct logical_volume *lv, char **sync_action);
int lv_raid_message(const struct logical_volume *lv, const char *msg);
+int lv_writecache_message(const struct logical_volume *lv, const char *msg);
int lv_cache_status(const struct logical_volume *cache_lv,
struct lv_status_cache **status);
int lv_thin_pool_percent(const struct logical_volume *lv, int metadata,
@@ -255,6 +258,7 @@ int device_is_usable(struct device *dev, struct dev_usable_check_params check);
void fs_unlock(void);
#define TARGET_NAME_CACHE "cache"
+#define TARGET_NAME_WRITECACHE "writecache"
#define TARGET_NAME_ERROR "error"
#define TARGET_NAME_ERROR_OLD "erro" /* Truncated in older kernels */
#define TARGET_NAME_LINEAR "linear"
@@ -271,6 +275,7 @@ void fs_unlock(void);
#define MODULE_NAME_CLUSTERED_MIRROR "clog"
#define MODULE_NAME_CACHE TARGET_NAME_CACHE
+#define MODULE_NAME_WRITECACHE TARGET_NAME_WRITECACHE
#define MODULE_NAME_ERROR TARGET_NAME_ERROR
#define MODULE_NAME_LOG_CLUSTERED "log-clustered"
#define MODULE_NAME_LOG_USERSPACE "log-userspace"
diff --git a/lib/activate/dev_manager.c b/lib/activate/dev_manager.c
index 1249581..49f425b 100644
--- a/lib/activate/dev_manager.c
+++ b/lib/activate/dev_manager.c
@@ -213,6 +213,10 @@ static int _get_segment_status_from_target_params(const char *target_name,
if (!parse_vdo_pool_status(seg_status->mem, seg->lv, params, &seg_status->vdo_pool))
return_0;
seg_status->type = SEG_STATUS_VDO_POOL;
+ } else if (segtype_is_writecache(segtype)) {
+ if (!dm_get_status_writecache(seg_status->mem, params, &(seg_status->writecache)))
+ return_0;
+ seg_status->type = SEG_STATUS_WRITECACHE;
} else
/*
* TODO: Add support for other segment types too!
@@ -1557,6 +1561,40 @@ out:
return r;
}
+int dev_manager_writecache_message(struct dev_manager *dm,
+ const struct logical_volume *lv,
+ const char *msg)
+{
+ int r = 0;
+ const char *dlid;
+ struct dm_task *dmt;
+ const char *layer = lv_layer(lv);
+
+ if (!lv_is_writecache(lv)) {
+ log_error(INTERNAL_ERROR "%s is not a writecache logical volume.",
+ display_lvname(lv));
+ return 0;
+ }
+
+ if (!(dlid = build_dm_uuid(dm->mem, lv, layer)))
+ return_0;
+
+ if (!(dmt = _setup_task_run(DM_DEVICE_TARGET_MSG, NULL, NULL, dlid, 0, 0, 0, 0, 1, 0)))
+ return_0;
+
+ if (!dm_task_set_message(dmt, msg))
+ goto_out;
+
+ if (!dm_task_run(dmt))
+ goto_out;
+
+ r = 1;
+out:
+ dm_task_destroy(dmt);
+
+ return r;
+}
+
int dev_manager_cache_status(struct dev_manager *dm,
const struct logical_volume *lv,
struct lv_status_cache **status)
@@ -2601,6 +2639,10 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
if (seg->metadata_lv &&
!_add_lv_to_dtree(dm, dtree, seg->metadata_lv, 0))
return_0;
+ if (seg->writecache && seg_is_writecache(seg)) {
+ if (!_add_lv_to_dtree(dm, dtree, seg->writecache, dm->activation ? origin_only : 1))
+ return_0;
+ }
if (seg->pool_lv &&
(lv_is_cache_pool(seg->pool_lv) || lv_is_cache_single(seg->pool_lv) || dm->track_external_lv_deps) &&
/* When activating and not origin_only detect linear 'overlay' over pool */
@@ -3053,6 +3095,11 @@ static int _add_segment_to_dtree(struct dev_manager *dm,
lv_layer(seg->pool_lv)))
return_0;
+ if (seg->writecache && !laopts->origin_only &&
+ !_add_new_lv_to_dtree(dm, dtree, seg->writecache, laopts,
+ lv_layer(seg->writecache)))
+ return_0;
+
/* Add any LVs used by this segment */
for (s = 0; s < seg->area_count; ++s) {
if ((seg_type(seg, s) == AREA_LV) &&
diff --git a/lib/activate/dev_manager.h b/lib/activate/dev_manager.h
index b669bd2..ca8c0d3 100644
--- a/lib/activate/dev_manager.h
+++ b/lib/activate/dev_manager.h
@@ -63,6 +63,9 @@ int dev_manager_raid_status(struct dev_manager *dm,
int dev_manager_raid_message(struct dev_manager *dm,
const struct logical_volume *lv,
const char *msg);
+int dev_manager_writecache_message(struct dev_manager *dm,
+ const struct logical_volume *lv,
+ const char *msg);
int dev_manager_cache_status(struct dev_manager *dm,
const struct logical_volume *lv,
struct lv_status_cache **status);
diff --git a/lib/commands/toolcontext.c b/lib/commands/toolcontext.c
index 9f00593..58d46ac 100644
--- a/lib/commands/toolcontext.c
+++ b/lib/commands/toolcontext.c
@@ -1367,6 +1367,11 @@ static int _init_segtypes(struct cmd_context *cmd)
return_0;
#endif
+#ifdef WRITECACHE_INTERNAL
+ if (!init_writecache_segtypes(cmd, &seglib))
+ return 0;
+#endif
+
#ifdef HAVE_LIBDL
/* Load any formats in shared libs unless static */
if (!is_static() &&
diff --git a/lib/device/dev-type.c b/lib/device/dev-type.c
index f2d193b..1f74fdc 100644
--- a/lib/device/dev-type.c
+++ b/lib/device/dev-type.c
@@ -35,6 +35,48 @@
#include "lib/device/device-types.h"
+/*
+ * dev is pmem if /sys/dev/block/<major>:<minor>/queue/dax is 1
+ */
+
+int dev_is_pmem(struct device *dev)
+{
+ FILE *fp;
+ char path[PATH_MAX];
+ char buffer[64];
+ int is_pmem = 0;
+
+ if (dm_snprintf(path, sizeof(path), "%sdev/block/%d:%d/queue/dax",
+ dm_sysfs_dir(),
+ (int) MAJOR(dev->dev),
+ (int) MINOR(dev->dev)) < 0) {
+ log_warn("Sysfs path for %s dax is too long.", dev_name(dev));
+ return 0;
+ }
+
+ if (!(fp = fopen(path, "r")))
+ return 0;
+
+ if (!fgets(buffer, sizeof(buffer), fp)) {
+ log_warn("Failed to read %s.", path);
+ fclose(fp);
+ return 0;
+ } else if (sscanf(buffer, "%d", &is_pmem) != 1) {
+ log_warn("Failed to parse %s '%s'.", path, buffer);
+ fclose(fp);
+ return 0;
+ }
+
+ fclose(fp);
+
+ if (is_pmem) {
+ log_debug("%s is pmem", dev_name(dev));
+ return 1;
+ }
+
+ return 0;
+}
+
struct dev_types *create_dev_types(const char *proc_dir,
const struct dm_config_node *cn)
{
diff --git a/lib/device/dev-type.h b/lib/device/dev-type.h
index 0e418d6..e8f0fcb 100644
--- a/lib/device/dev-type.h
+++ b/lib/device/dev-type.h
@@ -92,4 +92,6 @@ unsigned long dev_discard_granularity(struct dev_types *dt, struct device *dev);
int dev_is_rotational(struct dev_types *dt, struct device *dev);
+int dev_is_pmem(struct device *dev);
+
#endif
diff --git a/lib/format_text/flags.c b/lib/format_text/flags.c
index d7c4318..cf5be00 100644
--- a/lib/format_text/flags.c
+++ b/lib/format_text/flags.c
@@ -102,6 +102,7 @@ static const struct flag _lv_flags[] = {
{LV_VDO, NULL, 0},
{LV_VDO_POOL, NULL, 0},
{LV_VDO_POOL_DATA, NULL, 0},
+ {WRITECACHE, NULL, 0},
{LV_PENDING_DELETE, NULL, 0}, /* FIXME Display like COMPATIBLE_FLAG */
{LV_REMOVED, NULL, 0},
{0, NULL, 0}
diff --git a/lib/metadata/lv.c b/lib/metadata/lv.c
index cb064d8..0e54323 100644
--- a/lib/metadata/lv.c
+++ b/lib/metadata/lv.c
@@ -578,6 +578,8 @@ struct logical_volume *lv_origin_lv(const struct logical_volume *lv)
origin = first_seg(lv)->origin;
else if (lv_is_thin_volume(lv) && first_seg(lv)->external_lv)
origin = first_seg(lv)->external_lv;
+ else if (lv_is_writecache(lv) && first_seg(lv)->origin)
+ origin = first_seg(lv)->origin;
return origin;
}
@@ -1192,7 +1194,7 @@ char *lv_attr_dup_with_info_and_seg_status(struct dm_pool *mem, const struct lv_
lv_is_pool_metadata_spare(lv) ||
lv_is_raid_metadata(lv))
repstr[0] = 'e';
- else if (lv_is_cache_type(lv))
+ else if (lv_is_cache_type(lv) || lv_is_writecache(lv))
repstr[0] = 'C';
else if (lv_is_raid(lv))
repstr[0] = (lv_is_not_synced(lv)) ? 'R' : 'r';
diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c
index 8e64dac..d5603c7 100644
--- a/lib/metadata/lv_manip.c
+++ b/lib/metadata/lv_manip.c
@@ -1572,6 +1572,11 @@ int lv_reduce(struct logical_volume *lv, uint32_t extents)
{
struct lv_segment *seg = first_seg(lv);
+ if (lv_is_writecache(lv)) {
+ log_error("Remove not yet allowed on LVs with writecache attached.");
+ return 0;
+ }
+
/* Ensure stripe boundary extents on RAID LVs */
if (lv_is_raid(lv) && extents != lv->le_count)
extents =_round_to_stripe_boundary(lv->vg, extents,
@@ -5562,6 +5567,11 @@ int lv_resize(struct logical_volume *lv,
int ret = 0;
int status;
+ if (lv_is_writecache(lv)) {
+ log_error("Resize not yet allowed on LVs with writecache attached.");
+ return 0;
+ }
+
if (!_lvresize_check(lv, lp))
return_0;
diff --git a/lib/metadata/merge.c b/lib/metadata/merge.c
index d95da1f..bdd9c67 100644
--- a/lib/metadata/merge.c
+++ b/lib/metadata/merge.c
@@ -710,7 +710,7 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
}
if (seg->log_lv == lv)
seg_found++;
- if (seg->metadata_lv == lv || seg->pool_lv == lv)
+ if (seg->metadata_lv == lv || seg->pool_lv == lv || seg->writecache == lv)
seg_found++;
if (seg_is_thin_volume(seg) && (seg->origin == lv || seg->external_lv == lv))
seg_found++;
diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h
index 30ab356..7f673cd 100644
--- a/lib/metadata/metadata-exported.h
+++ b/lib/metadata/metadata-exported.h
@@ -95,6 +95,7 @@
#define MERGING UINT64_C(0x0000000010000000) /* LV SEG */
#define UNLABELLED_PV UINT64_C(0x0000000080000000) /* PV -this PV had no label written yet */
+#define WRITECACHE UINT64_C(0x0000000080000000) /* LV - shared with UNLABELLED_PV */
#define RAID UINT64_C(0x0000000100000000) /* LV - Internal use only */
#define RAID_META UINT64_C(0x0000000200000000) /* LV - Internal use only */
@@ -258,6 +259,7 @@
#define lv_is_pool_metadata(lv) (((lv)->status & (CACHE_POOL_METADATA | THIN_POOL_METADATA)) ? 1 : 0)
#define lv_is_pool_metadata_spare(lv) (((lv)->status & POOL_METADATA_SPARE) ? 1 : 0)
#define lv_is_lockd_sanlock_lv(lv) (((lv)->status & LOCKD_SANLOCK_LV) ? 1 : 0)
+#define lv_is_writecache(lv) (((lv)->status & WRITECACHE) ? 1 : 0)
#define lv_is_vdo(lv) (((lv)->status & LV_VDO) ? 1 : 0)
#define lv_is_vdo_pool(lv) (((lv)->status & LV_VDO_POOL) ? 1 : 0)
@@ -509,6 +511,10 @@ struct lv_segment {
struct dm_config_node *policy_settings; /* For cache_pool */
unsigned cleaner_policy; /* For cache */
+ struct logical_volume *writecache; /* For writecache */
+ uint32_t writecache_block_size; /* For writecache */
+ struct writecache_settings writecache_settings; /* For writecache */
+
struct dm_vdo_target_params vdo_params; /* For VDO-pool */
uint32_t vdo_pool_header_size; /* For VDO-pool */
uint32_t vdo_pool_virtual_extents; /* For VDO-pool */
@@ -1360,4 +1366,6 @@ int is_system_id_allowed(struct cmd_context *cmd, const char *system_id);
int vg_strip_outdated_historical_lvs(struct volume_group *vg);
+int lv_on_pmem(struct logical_volume *lv);
+
#endif
diff --git a/lib/metadata/metadata.c b/lib/metadata/metadata.c
index 57e1842..6c71c74 100644
--- a/lib/metadata/metadata.c
+++ b/lib/metadata/metadata.c
@@ -5545,3 +5545,38 @@ int vg_strip_outdated_historical_lvs(struct volume_group *vg) {
return 1;
}
+
+int lv_on_pmem(struct logical_volume *lv)
+{
+ struct lv_segment *seg;
+ struct physical_volume *pv;
+ uint32_t s;
+ int pmem_devs = 0, other_devs = 0;
+
+ dm_list_iterate_items(seg, &lv->segments) {
+ for (s = 0; s < seg->area_count; s++) {
+ pv = seg_pv(seg, s);
+
+ if (dev_is_pmem(pv->dev)) {
+ log_debug("LV %s dev %s is pmem.", lv->name, dev_name(pv->dev));
+ pmem_devs++;
+ } else {
+ log_debug("LV %s dev %s not pmem.", lv->name, dev_name(pv->dev));
+ other_devs++;
+ }
+ }
+ }
+
+ if (pmem_devs && other_devs) {
+ log_error("Invalid mix of cache device types in %s.", display_lvname(lv));
+ return -1;
+ }
+
+ if (pmem_devs) {
+ log_debug("LV %s on pmem", lv->name);
+ return 1;
+ }
+
+ return 0;
+}
+
diff --git a/lib/metadata/segtype.h b/lib/metadata/segtype.h
index 6fdf075..22a511e 100644
--- a/lib/metadata/segtype.h
+++ b/lib/metadata/segtype.h
@@ -66,6 +66,7 @@ struct dev_manager;
#define SEG_RAID6_RS_6 (1ULL << 34)
#define SEG_RAID6_N_6 (1ULL << 35)
#define SEG_RAID6 SEG_RAID6_ZR
+#define SEG_WRITECACHE (1ULL << 36)
#define SEG_STRIPED_TARGET (1ULL << 39)
#define SEG_LINEAR_TARGET (1ULL << 40)
@@ -82,6 +83,7 @@ struct dev_manager;
#define SEG_TYPE_NAME_THIN_POOL "thin-pool"
#define SEG_TYPE_NAME_CACHE "cache"
#define SEG_TYPE_NAME_CACHE_POOL "cache-pool"
+#define SEG_TYPE_NAME_WRITECACHE "writecache"
#define SEG_TYPE_NAME_ERROR "error"
#define SEG_TYPE_NAME_FREE "free"
#define SEG_TYPE_NAME_ZERO "zero"
@@ -114,6 +116,7 @@ struct dev_manager;
#define segtype_is_striped_target(segtype) ((segtype)->flags & SEG_STRIPED_TARGET ? 1 : 0)
#define segtype_is_cache(segtype) ((segtype)->flags & SEG_CACHE ? 1 : 0)
#define segtype_is_cache_pool(segtype) ((segtype)->flags & SEG_CACHE_POOL ? 1 : 0)
+#define segtype_is_writecache(segtype) ((segtype)->flags & SEG_WRITECACHE ? 1 : 0)
#define segtype_is_mirrored(segtype) ((segtype)->flags & SEG_AREAS_MIRRORED ? 1 : 0)
#define segtype_is_mirror(segtype) ((segtype)->flags & SEG_MIRROR ? 1 : 0)
#define segtype_is_pool(segtype) ((segtype)->flags & (SEG_CACHE_POOL | SEG_THIN_POOL) ? 1 : 0)
@@ -175,6 +178,7 @@ struct dev_manager;
#define seg_is_striped_target(seg) segtype_is_striped_target((seg)->segtype)
#define seg_is_cache(seg) segtype_is_cache((seg)->segtype)
#define seg_is_cache_pool(seg) segtype_is_cache_pool((seg)->segtype)
+#define seg_is_writecache(seg) segtype_is_writecache((seg)->segtype)
#define seg_is_used_cache_pool(seg) (seg_is_cache_pool(seg) && (!dm_list_empty(&(seg->lv)->segs_using_this_lv)))
#define seg_is_linear(seg) (seg_is_striped(seg) && ((seg)->area_count == 1))
#define seg_is_mirror(seg) segtype_is_mirror((seg)->segtype)
@@ -341,6 +345,8 @@ int init_cache_segtypes(struct cmd_context *cmd, struct segtype_library *seglib)
int init_vdo_segtypes(struct cmd_context *cmd, struct segtype_library *seglib);
#endif
+int init_writecache_segtypes(struct cmd_context *cmd, struct segtype_library *seglib);
+
#define CACHE_FEATURE_POLICY_MQ (1U << 0)
#define CACHE_FEATURE_POLICY_SMQ (1U << 1)
#define CACHE_FEATURE_METADATA2 (1U << 2)
diff --git a/lib/writecache/writecache.c b/lib/writecache/writecache.c
new file mode 100644
index 0000000..e9d337b
--- /dev/null
+++ b/lib/writecache/writecache.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2013-2016 Red Hat, Inc. All rights reserved.
+ *
+ * This file is part of LVM2.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU Lesser General Public License v.2.1.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "base/memory/zalloc.h"
+#include "lib/misc/lib.h"
+#include "lib/commands/toolcontext.h"
+#include "lib/metadata/segtype.h"
+#include "lib/display/display.h"
+#include "lib/format_text/text_export.h"
+#include "lib/config/config.h"
+#include "lib/datastruct/str_list.h"
+#include "lib/misc/lvm-string.h"
+#include "lib/activate/activate.h"
+#include "lib/metadata/metadata.h"
+#include "lib/metadata/lv_alloc.h"
+#include "lib/config/defaults.h"
+
+static const char _writecache_module[] = "writecache";
+
+#define SEG_LOG_ERROR(t, p...) \
+ log_error(t " segment %s of logical volume %s.", ## p, \
+ dm_config_parent_name(sn), seg->lv->name), 0;
+
+static void _writecache_display(const struct lv_segment *seg)
+{
+ /* TODO: lvdisplay segments */
+}
+
+static int _writecache_text_import(struct lv_segment *seg,
+ const struct dm_config_node *sn,
+ struct dm_hash_table *pv_hash __attribute__((unused)))
+{
+ struct logical_volume *origin_lv = NULL;
+ struct logical_volume *fast_lv;
+ const char *origin_name = NULL;
+ const char *fast_name = NULL;
+
+ if (!dm_config_has_node(sn, "origin"))
+ return SEG_LOG_ERROR("origin not specified in");
+
+ if (!dm_config_get_str(sn, "origin", &origin_name))
+ return SEG_LOG_ERROR("origin must be a string in");
+
+ if (!(origin_lv = find_lv(seg->lv->vg, origin_name)))
+ return SEG_LOG_ERROR("Unknown LV specified for writecache origin %s in", origin_name);
+
+ if (!set_lv_segment_area_lv(seg, 0, origin_lv, 0, 0))
+ return_0;
+
+ if (!dm_config_has_node(sn, "writecache"))
+ return SEG_LOG_ERROR("writecache not specified in");
+
+ if (!dm_config_get_str(sn, "writecache", &fast_name))
+ return SEG_LOG_ERROR("writecache must be a string in");
+
+ if (!(fast_lv = find_lv(seg->lv->vg, fast_name)))
+ return SEG_LOG_ERROR("Unknown logical volume %s specified for writecache in",
+ fast_name);
+
+ if (!dm_config_get_uint32(sn, "writecache_block_size", &seg->writecache_block_size))
+ return SEG_LOG_ERROR("writecache block_size must be set in");
+
+ seg->origin = origin_lv;
+ seg->writecache = fast_lv;
+ seg->lv->status |= WRITECACHE;
+
+ if (!add_seg_to_segs_using_this_lv(fast_lv, seg))
+ return_0;
+
+ memset(&seg->writecache_settings, 0, sizeof(struct writecache_settings));
+
+ if (dm_config_has_node(sn, "high_watermark")) {
+ if (!dm_config_get_uint64(sn, "high_watermark", &seg->writecache_settings.high_watermark))
+ return SEG_LOG_ERROR("Unknown writecache_setting in");
+ seg->writecache_settings.high_watermark_set = 1;
+ }
+
+ if (dm_config_has_node(sn, "low_watermark")) {
+ if (!dm_config_get_uint64(sn, "low_watermark", &seg->writecache_settings.low_watermark))
+ return SEG_LOG_ERROR("Unknown writecache_setting in");
+ seg->writecache_settings.low_watermark_set = 1;
+ }
+
+ if (dm_config_has_node(sn, "writeback_jobs")) {
+ if (!dm_config_get_uint64(sn, "writeback_jobs", &seg->writecache_settings.writeback_jobs))
+ return SEG_LOG_ERROR("Unknown writecache_setting in");
+ seg->writecache_settings.writeback_jobs_set = 1;
+ }
+
+ if (dm_config_has_node(sn, "autocommit_blocks")) {
+ if (!dm_config_get_uint64(sn, "autocommit_blocks", &seg->writecache_settings.autocommit_blocks))
+ return SEG_LOG_ERROR("Unknown writecache_setting in");
+ seg->writecache_settings.autocommit_blocks_set = 1;
+ }
+
+ if (dm_config_has_node(sn, "autocommit_time")) {
+ if (!dm_config_get_uint64(sn, "autocommit_time", &seg->writecache_settings.autocommit_time))
+ return SEG_LOG_ERROR("Unknown writecache_setting in");
+ seg->writecache_settings.autocommit_time_set = 1;
+ }
+
+ if (dm_config_has_node(sn, "fua")) {
+ if (!dm_config_get_uint32(sn, "fua", &seg->writecache_settings.fua))
+ return SEG_LOG_ERROR("Unknown writecache_setting in");
+ seg->writecache_settings.fua_set = 1;
+ }
+
+ if (dm_config_has_node(sn, "nofua")) {
+ if (!dm_config_get_uint32(sn, "nofua", &seg->writecache_settings.nofua))
+ return SEG_LOG_ERROR("Unknown writecache_setting in");
+ seg->writecache_settings.nofua_set = 1;
+ }
+
+ if (dm_config_has_node(sn, "writecache_setting_key")) {
+ const char *key;
+ const char *val;
+
+ if (!dm_config_get_str(sn, "writecache_setting_key", &key))
+ return SEG_LOG_ERROR("Unknown writecache_setting in");
+ if (!dm_config_get_str(sn, "writecache_setting_val", &val))
+ return SEG_LOG_ERROR("Unknown writecache_setting in");
+
+ seg->writecache_settings.new_key = dm_pool_strdup(seg->lv->vg->vgmem, key);
+ seg->writecache_settings.new_val = dm_pool_strdup(seg->lv->vg->vgmem, val);
+ }
+
+ return 1;
+}
+
+static int _writecache_text_import_area_count(const struct dm_config_node *sn,
+ uint32_t *area_count)
+{
+ *area_count = 1;
+
+ return 1;
+}
+
+static int _writecache_text_export(const struct lv_segment *seg,
+ struct formatter *f)
+{
+ outf(f, "writecache = \"%s\"", seg->writecache->name);
+ outf(f, "origin = \"%s\"", seg_lv(seg, 0)->name);
+ outf(f, "writecache_block_size = %u", seg->writecache_block_size);
+
+ if (seg->writecache_settings.high_watermark_set) {
+ outf(f, "high_watermark = %llu",
+ (unsigned long long)seg->writecache_settings.high_watermark);
+ }
+
+ if (seg->writecache_settings.low_watermark_set) {
+ outf(f, "low_watermark = %llu",
+ (unsigned long long)seg->writecache_settings.low_watermark);
+ }
+
+ if (seg->writecache_settings.writeback_jobs_set) {
+ outf(f, "writeback_jobs = %llu",
+ (unsigned long long)seg->writecache_settings.writeback_jobs);
+ }
+
+ if (seg->writecache_settings.autocommit_blocks_set) {
+ outf(f, "autocommit_blocks = %llu",
+ (unsigned long long)seg->writecache_settings.autocommit_blocks);
+ }
+
+ if (seg->writecache_settings.autocommit_time_set) {
+ outf(f, "autocommit_time = %llu",
+ (unsigned long long)seg->writecache_settings.autocommit_time);
+ }
+
+ if (seg->writecache_settings.fua_set) {
+ outf(f, "fua = %u", seg->writecache_settings.fua);
+ }
+
+ if (seg->writecache_settings.nofua_set) {
+ outf(f, "nofua = %u", seg->writecache_settings.nofua);
+ }
+
+ if (seg->writecache_settings.new_key && seg->writecache_settings.new_val) {
+ outf(f, "writecache_setting_key = \"%s\"",
+ seg->writecache_settings.new_key);
+
+ outf(f, "writecache_setting_val = \"%s\"",
+ seg->writecache_settings.new_val);
+ }
+
+ return 1;
+}
+
+static void _destroy(struct segment_type *segtype)
+{
+ free((void *) segtype);
+}
+
+#ifdef DEVMAPPER_SUPPORT
+
+static int _target_present(struct cmd_context *cmd,
+ const struct lv_segment *seg __attribute__((unused)),
+ unsigned *attributes __attribute__((unused)))
+{
+ static int _writecache_checked = 0;
+ static int _writecache_present = 0;
+
+ if (!activation())
+ return 0;
+
+ if (!_writecache_checked) {
+ _writecache_checked = 1;
+ _writecache_present = target_present(cmd, TARGET_NAME_WRITECACHE, 0);
+ }
+
+ return _writecache_present;
+}
+
+static int _modules_needed(struct dm_pool *mem,
+ const struct lv_segment *seg __attribute__((unused)),
+ struct dm_list *modules)
+{
+ if (!str_list_add(mem, modules, MODULE_NAME_WRITECACHE)) {
+ log_error("String list allocation failed for writecache module.");
+ return 0;
+ }
+
+ return 1;
+}
+#endif /* DEVMAPPER_SUPPORT */
+
+#ifdef DEVMAPPER_SUPPORT
+static int _writecache_add_target_line(struct dev_manager *dm,
+ struct dm_pool *mem,
+ struct cmd_context *cmd __attribute__((unused)),
+ void **target_state __attribute__((unused)),
+ struct lv_segment *seg,
+ const struct lv_activate_opts *laopts __attribute__((unused)),
+ struct dm_tree_node *node, uint64_t len,
+ uint32_t *pvmove_mirror_count __attribute__((unused)))
+{
+ char *origin_uuid;
+ char *fast_uuid;
+ int pmem;
+
+ if (!seg_is_writecache(seg)) {
+ log_error(INTERNAL_ERROR "Passed segment is not writecache.");
+ return 0;
+ }
+
+ if (!seg->writecache) {
+ log_error(INTERNAL_ERROR "Passed segment has no writecache.");
+ return 0;
+ }
+
+ if ((pmem = lv_on_pmem(seg->writecache)) < 0)
+ return_0;
+
+ if (!(origin_uuid = build_dm_uuid(mem, seg_lv(seg, 0), NULL)))
+ return_0;
+
+ if (!(fast_uuid = build_dm_uuid(mem, seg->writecache, NULL)))
+ return_0;
+
+ if (!dm_tree_node_add_writecache_target(node, len,
+ origin_uuid, fast_uuid,
+ pmem,
+ seg->writecache_block_size,
+ &seg->writecache_settings))
+ return_0;
+
+ return 1;
+}
+#endif /* DEVMAPPER_SUPPORT */
+
+static struct segtype_handler _writecache_ops = {
+ .display = _writecache_display,
+ .text_import = _writecache_text_import,
+ .text_import_area_count = _writecache_text_import_area_count,
+ .text_export = _writecache_text_export,
+#ifdef DEVMAPPER_SUPPORT
+ .add_target_line = _writecache_add_target_line,
+ .target_present = _target_present,
+ .modules_needed = _modules_needed,
+#endif
+ .destroy = _destroy,
+};
+
+int init_writecache_segtypes(struct cmd_context *cmd,
+ struct segtype_library *seglib)
+{
+ struct segment_type *segtype = zalloc(sizeof(*segtype));
+
+ if (!segtype) {
+ log_error("Failed to allocate memory for writecache segtype");
+ return 0;
+ }
+
+ segtype->name = SEG_TYPE_NAME_WRITECACHE;
+ segtype->flags = SEG_WRITECACHE;
+ segtype->ops = &_writecache_ops;
+
+ if (!lvm_register_segtype(seglib, segtype))
+ return_0;
+ log_very_verbose("Initialised segtype: %s", segtype->name);
+
+ return 1;
+}
diff --git a/test/shell/writecache.sh b/test/shell/writecache.sh
new file mode 100644
index 0000000..19cc93b
--- /dev/null
+++ b/test/shell/writecache.sh
@@ -0,0 +1,129 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2017 Red Hat, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v.2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# Test writecache usage
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+mount_dir="mnt"
+mkdir -p $mount_dir
+
+# generate random data
+dmesg > pattern1
+ps aux >> pattern1
+
+aux prepare_devs 2 64
+
+vgcreate $SHARED $vg "$dev1"
+
+vgextend $vg "$dev2"
+
+lvcreate -n $lv1 -l 8 -an $vg "$dev1"
+
+lvcreate -n $lv2 -l 4 -an $vg "$dev2"
+
+# test1: create fs on LV before writecache is attached
+
+lvchange -ay $vg/$lv1
+
+mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
+
+mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+
+cp pattern1 $mount_dir/pattern1
+
+umount $mount_dir
+lvchange -an $vg/$lv1
+
+lvconvert --type writecache --cachepool $lv2 $vg/$lv1
+
+check lv_field $vg/$lv1 segtype writecache
+
+lvs -a $vg/$lv2 --noheadings -o segtype >out
+grep linear out
+
+lvchange -ay $vg/$lv1
+
+mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+
+diff pattern1 $mount_dir/pattern1
+
+cp pattern1 $mount_dir/pattern1b
+
+ls -l $mount_dir
+
+umount $mount_dir
+
+lvchange -an $vg/$lv1
+
+lvconvert --splitcache $vg/$lv1
+
+check lv_field $vg/$lv1 segtype linear
+check lv_field $vg/$lv2 segtype linear
+
+lvchange -ay $vg/$lv1
+lvchange -ay $vg/$lv2
+
+mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+
+ls -l $mount_dir
+
+diff pattern1 $mount_dir/pattern1
+diff pattern1 $mount_dir/pattern1b
+
+umount $mount_dir
+lvchange -an $vg/$lv1
+lvchange -an $vg/$lv2
+
+# test2: create fs on LV after writecache is attached
+
+lvconvert --type writecache --cachepool $lv2 $vg/$lv1
+
+check lv_field $vg/$lv1 segtype writecache
+
+lvs -a $vg/$lv2 --noheadings -o segtype >out
+grep linear out
+
+lvchange -ay $vg/$lv1
+
+mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
+
+mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+
+cp pattern1 $mount_dir/pattern1
+ls -l $mount_dir
+
+umount $mount_dir
+lvchange -an $vg/$lv1
+
+lvconvert --splitcache $vg/$lv1
+
+check lv_field $vg/$lv1 segtype linear
+check lv_field $vg/$lv2 segtype linear
+
+lvchange -ay $vg/$lv1
+lvchange -ay $vg/$lv2
+
+mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+
+ls -l $mount_dir
+
+diff pattern1 $mount_dir/pattern1
+
+umount $mount_dir
+lvchange -an $vg/$lv1
+lvchange -an $vg/$lv2
+
+vgremove -ff $vg
+
diff --git a/tools/args.h b/tools/args.h
index adca84b..cc65be5 100644
--- a/tools/args.h
+++ b/tools/args.h
@@ -816,6 +816,9 @@ arg(withversions_ARG, '\0', "withversions", 0, 0, 0,
"each configuration node. If the setting is deprecated, also display\n"
"the version since which it is deprecated.\n")
+arg(writecacheblocksize_ARG, '\0', "writecacheblocksize", sizekb_VAL, 0, 0,
+ "The block size to use for cache blocks in writecache.\n")
+
arg(writebehind_ARG, '\0', "writebehind", number_VAL, 0, 0,
"The maximum number of outstanding writes that are allowed to\n"
"devices in a RAID1 LV that is marked write-mostly.\n"
diff --git a/tools/command-lines.in b/tools/command-lines.in
index 840d771..a8e7272 100644
--- a/tools/command-lines.in
+++ b/tools/command-lines.in
@@ -454,7 +454,7 @@ RULE: --poolmetadata not --readahead --stripesize --stripes_long
lvconvert --type cache --cachepool LV LV_linear_striped_raid_thinpool
OO: --cache, OO_LVCONVERT_CACHE, OO_LVCONVERT_POOL, OO_LVCONVERT
ID: lvconvert_to_cache_vol
-DESC: Convert LV to type cache.
+DESC: Attach a cache to an LV, converts the LV to type cache.
RULE: all and lv_is_visible
RULE: --poolmetadata not --readahead --stripesize --stripes_long
@@ -462,13 +462,21 @@ RULE: --poolmetadata not --readahead --stripesize --stripes_long
lvconvert --cache --cachepool LV LV_linear_striped_raid_thinpool
OO: --type cache, OO_LVCONVERT_CACHE, OO_LVCONVERT_POOL, OO_LVCONVERT
ID: lvconvert_to_cache_vol
-DESC: Convert LV to type cache (infers --type cache).
+DESC: Attach a cache to an LV (infers --type cache).
RULE: all and lv_is_visible
RULE: --poolmetadata not --readahead --stripesize --stripes_long
FLAGS: SECONDARY_SYNTAX
---
+lvconvert --type writecache --cachepool LV LV_linear_striped_raid
+OO: OO_LVCONVERT, --cachesettings String, --writecacheblocksize SizeKB
+ID: lvconvert_to_writecache_vol
+DESC: Attach a writecache to an LV, converts the LV to type writecache.
+RULE: all and lv_is_visible
+
+---
+
lvconvert --type thin-pool LV_linear_striped_raid_cache
OO: --stripes_long Number, --stripesize SizeKB,
--discards Discards, OO_LVCONVERT_POOL, OO_LVCONVERT
@@ -573,17 +581,17 @@ FLAGS: SECONDARY_SYNTAX
---
-lvconvert --splitcache LV_cachepool_cache_thinpool
+lvconvert --splitcache LV_cachepool_cache_thinpool_writecache
OO: OO_LVCONVERT
-ID: lvconvert_split_and_keep_cachepool
-DESC: Separate and keep the cache pool from a cache LV.
+ID: lvconvert_split_and_keep_cache
+DESC: Detach a cache from an LV.
---
-lvconvert --uncache LV_cache_thinpool
+lvconvert --uncache LV_cache_thinpool_writecache
OO: OO_LVCONVERT
-ID: lvconvert_split_and_remove_cachepool
-DESC: Separate and delete the cache pool from a cache LV.
+ID: lvconvert_split_and_remove_cache
+DESC: Detach and delete a cache from an LV.
FLAGS: SECONDARY_SYNTAX
---
diff --git a/tools/lv_types.h b/tools/lv_types.h
index 494776b..778cd54 100644
--- a/tools/lv_types.h
+++ b/tools/lv_types.h
@@ -33,5 +33,6 @@ lvt(raid6_LVT, "raid6", NULL)
lvt(raid10_LVT, "raid10", NULL)
lvt(error_LVT, "error", NULL)
lvt(zero_LVT, "zero", NULL)
+lvt(writecache_LVT, "writecache", NULL)
lvt(LVT_COUNT, "", NULL)
diff --git a/tools/lvconvert.c b/tools/lvconvert.c
index 7382ce0..a207bd9 100644
--- a/tools/lvconvert.c
+++ b/tools/lvconvert.c
@@ -16,6 +16,7 @@
#include "lib/lvmpolld/polldaemon.h"
#include "lib/metadata/lv_alloc.h"
+#include "lib/metadata/metadata.h"
#include "lvconvert_poll.h"
#define MAX_PDATA_ARGS 10 /* Max number of accepted args for d-m-p-d tools */
@@ -1840,13 +1841,13 @@ static int _lvconvert_splitsnapshot(struct cmd_context *cmd, struct logical_volu
return 1;
}
-static int _lvconvert_split_and_keep_cachepool(struct cmd_context *cmd,
+static int _lvconvert_split_and_keep_cache(struct cmd_context *cmd,
struct logical_volume *lv,
- struct logical_volume *cachepool_lv)
+ struct logical_volume *lv_fast)
{
struct lv_segment *cache_seg = first_seg(lv);
- log_debug("Detaching cache %s from LV %s.", display_lvname(cachepool_lv), display_lvname(lv));
+ log_debug("Detaching cache %s from LV %s.", display_lvname(lv_fast), display_lvname(lv));
if (!archive(lv->vg))
return_0;
@@ -1865,12 +1866,12 @@ static int _lvconvert_split_and_keep_cachepool(struct cmd_context *cmd,
backup(lv->vg);
log_print_unless_silent("Logical volume %s is not cached and cache pool %s is unused.",
- display_lvname(lv), display_lvname(cachepool_lv));
+ display_lvname(lv), display_lvname(lv_fast));
return 1;
}
-static int _lvconvert_split_and_remove_cachepool(struct cmd_context *cmd,
+static int _lvconvert_split_and_remove_cache(struct cmd_context *cmd,
struct logical_volume *lv,
struct logical_volume *cachepool_lv)
{
@@ -4504,63 +4505,83 @@ int lvconvert_merge_thin_cmd(struct cmd_context *cmd, int argc, char **argv)
NULL, NULL, &_lvconvert_merge_thin_single);
}
-static int _lvconvert_split_cachepool_single(struct cmd_context *cmd,
+static int _lvconvert_detach_writecache(struct cmd_context *cmd,
+ struct logical_volume *lv,
+ struct logical_volume *lv_fast);
+
+static int _lvconvert_split_cache_single(struct cmd_context *cmd,
struct logical_volume *lv,
struct processing_handle *handle)
{
- struct logical_volume *cache_lv = NULL;
- struct logical_volume *cachepool_lv = NULL;
+ struct logical_volume *lv_main = NULL;
+ struct logical_volume *lv_fast = NULL;
struct lv_segment *seg;
int ret;
- if (lv_is_cache(lv)) {
- cache_lv = lv;
- cachepool_lv = first_seg(cache_lv)->pool_lv;
+ if (lv_is_writecache(lv)) {
+ lv_main = lv;
+ lv_fast = first_seg(lv_main)->writecache;
+
+ } else if (lv_is_cache(lv)) {
+ lv_main = lv;
+ lv_fast = first_seg(lv_main)->pool_lv;
} else if (lv_is_cache_pool(lv)) {
- cachepool_lv = lv;
+ lv_fast = lv;
- if ((dm_list_size(&cachepool_lv->segs_using_this_lv) == 1) &&
- (seg = get_only_segment_using_this_lv(cachepool_lv)) &&
+ if ((dm_list_size(&lv_fast->segs_using_this_lv) == 1) &&
+ (seg = get_only_segment_using_this_lv(lv_fast)) &&
seg_is_cache(seg))
- cache_lv = seg->lv;
+ lv_main = seg->lv;
} else if (lv_is_thin_pool(lv)) {
- cache_lv = seg_lv(first_seg(lv), 0); /* cached _tdata */
- cachepool_lv = first_seg(cache_lv)->pool_lv;
+ lv_main = seg_lv(first_seg(lv), 0); /* cached _tdata */
+ lv_fast = first_seg(lv_main)->pool_lv;
}
- if (!cache_lv) {
- log_error("Cannot find cache LV from %s.", display_lvname(lv));
+ if (!lv_main) {
+ log_error("Cannot find LV with cache from %s.", display_lvname(lv));
return ECMD_FAILED;
}
- if (!cachepool_lv) {
- log_error("Cannot find cache pool LV from %s.", display_lvname(lv));
+ if (!lv_fast) {
+ log_error("Cannot find cache %s.", display_lvname(lv));
return ECMD_FAILED;
}
- if ((cmd->command->command_enum == lvconvert_split_and_remove_cachepool_CMD) &&
- lv_is_cache_single(cachepool_lv)) {
- log_error("Detach cache from %s with --splitcache.", display_lvname(lv));
- log_error("The cache %s may then be removed with lvremove.", display_lvname(cachepool_lv));
- return 0;
- }
-
/* If LV is inactive here, ensure it's not active elsewhere. */
- if (!lockd_lv(cmd, cache_lv, "ex", 0))
+ if (!lockd_lv(cmd, lv_main, "ex", 0))
return_0;
- switch (cmd->command->command_enum) {
- case lvconvert_split_and_keep_cachepool_CMD:
- ret = _lvconvert_split_and_keep_cachepool(cmd, cache_lv, cachepool_lv);
- break;
+ if (lv_is_writecache(lv_main)) {
+ if (cmd->command->command_enum == lvconvert_split_and_remove_cache_CMD) {
+ log_error("Detach cache from %s with --splitcache.", display_lvname(lv));
+ log_error("The writecache %s may then be removed with lvremove.", display_lvname(lv_fast));
+ return 0;
+ }
- case lvconvert_split_and_remove_cachepool_CMD:
- ret = _lvconvert_split_and_remove_cachepool(cmd, cache_lv, cachepool_lv);
- break;
- default:
- log_error(INTERNAL_ERROR "Unknown cache pool split.");
+ ret = _lvconvert_detach_writecache(cmd, lv_main, lv_fast);
+
+ } else if (lv_is_cache(lv_main)) {
+ if ((cmd->command->command_enum == lvconvert_split_and_remove_cache_CMD) &&
+ lv_is_cache_single(lv_fast)) {
+ log_error("Detach cache from %s with --splitcache.", display_lvname(lv));
+ log_error("The cache %s may then be removed with lvremove.", display_lvname(lv_fast));
+ return 0;
+ }
+
+ if (cmd->command->command_enum == lvconvert_split_and_remove_cache_CMD)
+ ret = _lvconvert_split_and_remove_cache(cmd, lv_main, lv_fast);
+
+ else if (cmd->command->command_enum == lvconvert_split_and_keep_cache_CMD)
+ ret = _lvconvert_split_and_keep_cache(cmd, lv_main, lv_fast);
+
+ else {
+ log_error(INTERNAL_ERROR "Unknown cache split command.");
+ ret = 0;
+ }
+ } else {
+ log_error(INTERNAL_ERROR "Unknown cache split command.");
ret = 0;
}
@@ -4570,15 +4591,15 @@ static int _lvconvert_split_cachepool_single(struct cmd_context *cmd,
return ECMD_PROCESSED;
}
-int lvconvert_split_cachepool_cmd(struct cmd_context *cmd, int argc, char **argv)
+int lvconvert_split_cache_cmd(struct cmd_context *cmd, int argc, char **argv)
{
- if (cmd->command->command_enum == lvconvert_split_and_remove_cachepool_CMD) {
+ if (cmd->command->command_enum == lvconvert_split_and_remove_cache_CMD) {
cmd->handles_missing_pvs = 1;
cmd->partial_activation = 1;
}
return process_each_lv(cmd, 1, cmd->position_argv, NULL, NULL, READ_FOR_UPDATE,
- NULL, NULL, &_lvconvert_split_cachepool_single);
+ NULL, NULL, &_lvconvert_split_cache_single);
}
static int _lvconvert_raid_types_single(struct cmd_context *cmd, struct logical_volume *lv,
@@ -5041,6 +5062,524 @@ int lvconvert_to_vdopool_param_cmd(struct cmd_context *cmd, int argc, char **arg
NULL, NULL, &_lvconvert_to_vdopool_single);
}
+static int _lv_writecache_detach(struct cmd_context *cmd, struct logical_volume *lv,
+ struct logical_volume *lv_fast)
+{
+ struct lv_segment *seg = first_seg(lv);
+ struct logical_volume *origin;
+
+ if (!seg_is_writecache(seg)) {
+ log_error("LV %s segment is not writecache.", display_lvname(lv));
+ return 0;
+ }
+
+ if (!seg->writecache) {
+ log_error("LV %s writecache segment has no writecache.", display_lvname(lv));
+ return 0;
+ }
+
+ if (!(origin = seg_lv(seg, 0))) {
+ log_error("LV %s writecache segment has no origin", display_lvname(lv));
+ return 0;
+ }
+
+ if (!remove_seg_from_segs_using_this_lv(seg->writecache, seg))
+ return_0;
+
+ lv_set_visible(seg->writecache);
+
+ lv->status &= ~WRITECACHE;
+ seg->writecache = NULL;
+
+ if (!remove_layer_from_lv(lv, origin))
+ return_0;
+
+ if (!lv_remove(origin))
+ return_0;
+
+ return 1;
+}
+
+static int _get_writecache_kernel_error(struct cmd_context *cmd,
+ struct logical_volume *lv,
+ uint32_t *kernel_error)
+{
+ struct lv_with_info_and_seg_status status;
+
+ memset(&status, 0, sizeof(status));
+ status.seg_status.type = SEG_STATUS_NONE;
+
+ status.seg_status.seg = first_seg(lv);
+
+ /* FIXME: why reporter_pool? */
+ if (!(status.seg_status.mem = dm_pool_create("reporter_pool", 1024))) {
+ log_error("Failed to get mem for LV status.");
+ return 0;
+ }
+
+ if (!lv_info_with_seg_status(cmd, first_seg(lv), &status, 1, 1)) {
+ log_error("Failed to get device mapper status for %s", display_lvname(lv));
+ goto fail;
+ }
+
+ if (!status.info.exists) {
+ log_error("No device mapper info exists for %s", display_lvname(lv));
+ goto fail;
+ }
+
+ if (status.seg_status.type != SEG_STATUS_WRITECACHE) {
+ log_error("Invalid device mapper status type (%d) for %s",
+ (uint32_t)status.seg_status.type, display_lvname(lv));
+ goto fail;
+ }
+
+ *kernel_error = status.seg_status.writecache->error;
+
+ dm_pool_destroy(status.seg_status.mem);
+ return 1;
+
+fail:
+ dm_pool_destroy(status.seg_status.mem);
+ return 0;
+}
+
+/*
+ * TODO: add a new option that will skip activating and flushing the
+ * writecache and move directly to detaching.
+ */
+
+static int _lvconvert_detach_writecache(struct cmd_context *cmd,
+ struct logical_volume *lv,
+ struct logical_volume *lv_fast)
+{
+ uint32_t kernel_error = 0;
+
+ /*
+ * LV must be inactive externally before detaching cache.
+ */
+
+ if (lv_info(cmd, lv, 1, NULL, 0, 0)) {
+ log_error("LV %s must be inactive to detach writecache.", display_lvname(lv));
+ return 0;
+ }
+
+ if (!archive(lv->vg))
+ goto_bad;
+
+ /*
+ * Activate LV internally since the LV needs to be active to flush.
+ * LV_TEMPORARY should keep the LV from being exposed to the user
+ * and being accessed.
+ */
+
+ lv->status |= LV_TEMPORARY;
+
+ if (!activate_lv(cmd, lv)) {
+ log_error("Failed to activate LV %s for flushing.", display_lvname(lv));
+ return 0;
+ }
+
+ sync_local_dev_names(cmd);
+
+ if (!lv_writecache_message(lv, "flush")) {
+ log_error("Failed to flush writecache for %s.", display_lvname(lv));
+ deactivate_lv(cmd, lv);
+ return 0;
+ }
+
+ if (!_get_writecache_kernel_error(cmd, lv, &kernel_error)) {
+ log_error("Failed to get writecache error status for %s.", display_lvname(lv));
+ deactivate_lv(cmd, lv);
+ return 0;
+ }
+
+ if (kernel_error) {
+ log_error("Failed to flush writecache (error %u) for %s.", kernel_error, display_lvname(lv));
+ deactivate_lv(cmd, lv);
+ return 0;
+ }
+
+ if (!deactivate_lv(cmd, lv)) {
+ log_error("Failed to deactivate LV %s for detaching writecache.", display_lvname(lv));
+ return 0;
+ }
+
+ lv->status &= ~LV_TEMPORARY;
+
+ if (!_lv_writecache_detach(cmd, lv, lv_fast)) {
+ log_error("Failed to detach writecache from %s", display_lvname(lv));
+ return 0;
+ }
+
+ if (!vg_write(lv->vg) || !vg_commit(lv->vg))
+ return_0;
+
+ backup(lv->vg);
+
+ log_print_unless_silent("Logical volume %s write cache has been detached.",
+ display_lvname(lv));
+ return ECMD_PROCESSED;
+bad:
+ return ECMD_FAILED;
+
+}
+
+static int _writecache_zero(struct cmd_context *cmd, struct logical_volume *lv)
+{
+ struct device *dev;
+ char name[PATH_MAX];
+ int ret = 0;
+
+ if (!activate_lv(cmd, lv)) {
+ log_error("Failed to activate LV %s for zeroing.", lv->name);
+ return 0;
+ }
+
+ sync_local_dev_names(cmd);
+
+ if (dm_snprintf(name, sizeof(name), "%s%s/%s",
+ cmd->dev_dir, lv->vg->name, lv->name) < 0) {
+ log_error("Name too long - device not cleared (%s)", lv->name);
+ goto out;
+ }
+
+ if (!(dev = dev_cache_get(cmd, name, NULL))) {
+ log_error("%s: not found: device not zeroed", name);
+ goto out;
+ }
+
+ if (!label_scan_open(dev)) {
+ log_error("Failed to open %s/%s for zeroing.", lv->vg->name, lv->name);
+ goto out;
+ }
+
+ if (!dev_write_zeros(dev, UINT64_C(0), (size_t) 1 << SECTOR_SHIFT))
+ goto_out;
+
+ log_debug("Zeroed the first sector of %s", lv->name);
+
+ label_scan_invalidate(dev);
+
+ ret = 1;
+out:
+ if (!deactivate_lv(cmd, lv)) {
+ log_error("Failed to deactivate LV %s for zeroing.", lv->name);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int _get_one_writecache_setting(struct cmd_context *cmd, struct writecache_settings *settings,
+ char *key, char *val)
+{
+ if (!strncmp(key, "high_watermark", strlen("high_watermark"))) {
+ if (sscanf(val, "%llu", (unsigned long long *)&settings->high_watermark) != 1)
+ goto_bad;
+ settings->high_watermark_set = 1;
+ return 1;
+ }
+
+ if (!strncmp(key, "low_watermark", strlen("low_watermark"))) {
+ if (sscanf(val, "%llu", (unsigned long long *)&settings->low_watermark) != 1)
+ goto_bad;
+ settings->low_watermark_set = 1;
+ return 1;
+ }
+
+ if (!strncmp(key, "writeback_jobs", strlen("writeback_jobs"))) {
+ if (sscanf(val, "%llu", (unsigned long long *)&settings->writeback_jobs) != 1)
+ goto_bad;
+ settings->writeback_jobs_set = 1;
+ return 1;
+ }
+
+ if (!strncmp(key, "autocommit_blocks", strlen("autocommit_blocks"))) {
+ if (sscanf(val, "%llu", (unsigned long long *)&settings->autocommit_blocks) != 1)
+ goto_bad;
+ settings->autocommit_blocks_set = 1;
+ return 1;
+ }
+
+ if (!strncmp(key, "autocommit_time", strlen("autocommit_time"))) {
+ if (sscanf(val, "%llu", (unsigned long long *)&settings->autocommit_time) != 1)
+ goto_bad;
+ settings->autocommit_time_set = 1;
+ return 1;
+ }
+
+ if (!strncmp(key, "fua", strlen("fua"))) {
+ if (settings->nofua_set) {
+ log_error("Setting fua and nofua cannot both be set.");
+ return_0;
+ }
+ if (sscanf(val, "%u", &settings->fua) != 1)
+ goto_bad;
+ settings->fua_set = 1;
+ return 1;
+ }
+
+ if (!strncmp(key, "nofua", strlen("nofua"))) {
+ if (settings->nofua_set) {
+ log_error("Setting fua and nofua cannot both be set.");
+ return_0;
+ }
+ if (sscanf(val, "%u", &settings->nofua) != 1)
+ goto_bad;
+ settings->nofua_set = 1;
+ return 1;
+ }
+
+ if (settings->new_key) {
+ log_error("Setting %s is not recognized. Only one unrecognized setting is allowed.", key);
+ return 0;
+ }
+
+ log_warn("Unrecognized writecache setting \"%s\" may cause activation failure.", key);
+ if (yes_no_prompt("Use unrecognized writecache setting? [y/n]: ") == 'n') {
+ log_error("Aborting writecache conversion.");
+ return_0;
+ }
+
+ log_warn("Using unrecognized writecache setting: %s = %s.", key, val);
+
+ settings->new_key = dm_pool_strdup(cmd->mem, key);
+ settings->new_val = dm_pool_strdup(cmd->mem, val);
+ return 1;
+
+ bad:
+ log_error("Invalid setting: %s", key);
+ return 0;
+}
+
+static int _get_writecache_settings(struct cmd_context *cmd, struct writecache_settings *settings)
+{
+ struct arg_value_group_list *group;
+ const char *str;
+ char key[64];
+ char val[64];
+ int num;
+ int pos;
+
+ /*
+ * "grouped" means that multiple --cachesettings options can be used.
+ * Each option is also allowed to contain multiple key = val pairs.
+ */
+
+ dm_list_iterate_items(group, &cmd->arg_value_groups) {
+ if (!grouped_arg_is_set(group->arg_values, cachesettings_ARG))
+ continue;
+
+ if (!(str = grouped_arg_str_value(group->arg_values, cachesettings_ARG, NULL)))
+ break;
+
+ pos = 0;
+
+ while (pos < strlen(str)) {
+ /* scan for "key1=val1 key2 = val2 key3= val3" */
+
+ memset(key, 0, sizeof(key));
+ memset(val, 0, sizeof(val));
+
+ if (sscanf(str + pos, " %63[^=]=%63s %n", key, val, &num) != 2) {
+ log_error("Invalid setting at: %s", str+pos);
+ return_0;
+ }
+
+ pos += num;
+
+ if (!_get_one_writecache_setting(cmd, settings, key, val))
+ return_0;
+ }
+ }
+
+ return 1;
+}
+
+static struct logical_volume *_lv_writecache_create(struct cmd_context *cmd,
+ struct logical_volume *lv,
+ struct logical_volume *lv_fast,
+ uint32_t block_size_sectors,
+ struct writecache_settings *settings)
+{
+ struct logical_volume *lv_wcorig;
+ const struct segment_type *segtype;
+ struct lv_segment *seg;
+
+ /* should lv_fast get a new status flag indicating it's the cache in a writecache LV? */
+
+ if (!(segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_WRITECACHE)))
+ return_NULL;
+
+ /*
+ * "lv_wcorig" is a new LV with new id, but with the segments from "lv".
+ * "lv" keeps the existing name and id, but gets a new writecache segment,
+ * in place of the segments that were moved to lv_wcorig.
+ */
+
+ if (!(lv_wcorig = insert_layer_for_lv(cmd, lv, WRITECACHE, "_wcorig")))
+ return_NULL;
+
+ lv_set_hidden(lv_fast);
+
+ seg = first_seg(lv);
+ seg->segtype = segtype;
+
+ seg->writecache = lv_fast;
+
+ /* writecache_block_size is in bytes */
+ seg->writecache_block_size = block_size_sectors * 512;
+
+ memcpy(&seg->writecache_settings, settings, sizeof(struct writecache_settings));
+
+ add_seg_to_segs_using_this_lv(lv_fast, seg);
+
+ return lv_wcorig;
+}
+
+static int _lvconvert_writecache_attach_single(struct cmd_context *cmd,
+ struct logical_volume *lv,
+ struct processing_handle *handle)
+{
+ struct volume_group *vg = lv->vg;
+ struct logical_volume *lv_wcorig;
+ struct logical_volume *lv_fast;
+ struct writecache_settings settings;
+ const char *fast_name;
+ uint32_t block_size_sectors;
+ char *lockd_fast_args = NULL;
+ char *lockd_fast_name = NULL;
+ struct id lockd_fast_id;
+
+ fast_name = arg_str_value(cmd, cachepool_ARG, "");
+
+ if (!(lv_fast = find_lv(vg, fast_name))) {
+ log_error("LV %s not found.", fast_name);
+ goto bad;
+ }
+
+ if (!seg_is_linear(first_seg(lv_fast))) {
+ log_error("LV %s must be linear to use as a writecache.", display_lvname(lv_fast));
+ return 0;
+ }
+
+ /* fast LV shouldn't generally be active by itself, but just in case. */
+ if (lv_info(cmd, lv_fast, 1, NULL, 0, 0)) {
+ log_error("LV %s must be inactive to attach.", display_lvname(lv_fast));
+ return 0;
+ }
+
+ /* default block size is 4096 bytes (8 sectors) */
+ block_size_sectors = arg_int_value(cmd, writecacheblocksize_ARG, 8);
+ if (block_size_sectors > 8) {
+ log_error("Max writecache block size is 4096 bytes.");
+ return 0;
+ }
+
+ memset(&settings, 0, sizeof(settings));
+
+ if (!_get_writecache_settings(cmd, &settings)) {
+ log_error("Invalid writecache settings.");
+ return 0;
+ }
+
+ /* Ensure the two LVs are not active elsewhere. */
+ if (!lockd_lv(cmd, lv, "ex", 0))
+ goto_bad;
+ if (!lockd_lv(cmd, lv_fast, "ex", 0))
+ goto_bad;
+
+ if (!archive(vg))
+ goto_bad;
+
+ /*
+ * TODO: use libblkid to get the sector size of lv. If it doesn't
+ * match the block_size we are using for the writecache, then warn that
+ * an existing file system on lv may become unmountable with the
+ * writecache attached because of the changing sector size. If this
+ * happens, then use --splitcache, and reattach the writecache using a
+ * --writecacheblocksize value matching the sector size of lv.
+ */
+
+ if (!_writecache_zero(cmd, lv_fast)) {
+ log_error("LV %s could not be zeroed.", display_lvname(lv_fast));
+ return 0;
+ }
+
+ /*
+ * Changes the vg struct to match the desired state.
+ *
+ * - lv keeps existing lv name and id, gets new segment with segtype
+ * "writecache".
+ *
+ * - lv_fast keeps its existing name and id, becomes hidden.
+ *
+ * - lv_wcorig gets new name (existing name + _wcorig suffix),
+ * gets new id, becomes hidden, gets segments from lv.
+ */
+
+ if (!(lv_wcorig = _lv_writecache_create(cmd, lv, lv_fast, block_size_sectors, &settings)))
+ goto_bad;
+
+ /*
+ * lv keeps the same lockd lock it had before, the lock for
+ * lv_fast is freed, and lv_wcorig gets no lock.
+ */
+ if (vg_is_shared(vg) && lv_fast->lock_args) {
+ lockd_fast_args = dm_pool_strdup(cmd->mem, lv_fast->lock_args);
+ lockd_fast_name = dm_pool_strdup(cmd->mem, lv_fast->name);
+ memcpy(&lockd_fast_id, &lv_fast->lvid.id[1], sizeof(struct id));
+ lv_fast->lock_args = NULL;
+ }
+
+ /*
+ * vg_write(), suspend_lv(), vg_commit(), resume_lv(),
+ * where the old LV is suspended and the new LV is resumed.
+ */
+
+ if (!lv_update_and_reload(lv))
+ goto_bad;
+
+ lockd_lv(cmd, lv, "un", 0);
+
+ if (lockd_fast_name) {
+ /* unlock and free lockd lock for lv_fast */
+ if (!lockd_lv_name(cmd, vg, lockd_fast_name, &lockd_fast_id, lockd_fast_args, "un", 0))
+ log_error("Failed to unlock fast LV %s/%s", vg->name, lockd_fast_name);
+ lockd_free_lv(cmd, vg, lockd_fast_name, &lockd_fast_id, lockd_fast_args);
+ }
+
+ log_print_unless_silent("Logical volume %s now has write cache.",
+ display_lvname(lv));
+ return ECMD_PROCESSED;
+bad:
+ return ECMD_FAILED;
+
+}
+
+int lvconvert_to_writecache_vol_cmd(struct cmd_context *cmd, int argc, char **argv)
+{
+ struct processing_handle *handle;
+ struct lvconvert_result lr = { 0 };
+ int ret;
+
+ if (!(handle = init_processing_handle(cmd, NULL))) {
+ log_error("Failed to initialize processing handle.");
+ return ECMD_FAILED;
+ }
+
+ handle->custom_handle = &lr;
+
+ cmd->cname->flags &= ~GET_VGNAME_FROM_OPTIONS;
+
+ ret = process_each_lv(cmd, cmd->position_argc, cmd->position_argv, NULL, NULL, READ_FOR_UPDATE, handle, NULL,
+ &_lvconvert_writecache_attach_single);
+
+ destroy_processing_handle(cmd, handle);
+
+ return ret;
+}
+
/*
* All lvconvert command defs have their own function,
* so the generic function name is unused.
diff --git a/tools/lvmcmdline.c b/tools/lvmcmdline.c
index b1d0723..f49843d 100644
--- a/tools/lvmcmdline.c
+++ b/tools/lvmcmdline.c
@@ -124,12 +124,13 @@ static const struct command_function _command_functions[CMD_COUNT] = {
{ lvconvert_to_cachepool_CMD, lvconvert_to_pool_cmd },
{ lvconvert_to_thin_with_external_CMD, lvconvert_to_thin_with_external_cmd },
{ lvconvert_to_cache_vol_CMD, lvconvert_to_cache_vol_cmd },
+ { lvconvert_to_writecache_vol_CMD, lvconvert_to_writecache_vol_cmd },
{ lvconvert_swap_pool_metadata_CMD, lvconvert_swap_pool_metadata_cmd },
{ lvconvert_to_thinpool_or_swap_metadata_CMD, lvconvert_to_pool_or_swap_metadata_cmd },
{ lvconvert_to_cachepool_or_swap_metadata_CMD, lvconvert_to_pool_or_swap_metadata_cmd },
{ lvconvert_merge_thin_CMD, lvconvert_merge_thin_cmd },
- { lvconvert_split_and_keep_cachepool_CMD, lvconvert_split_cachepool_cmd },
- { lvconvert_split_and_remove_cachepool_CMD, lvconvert_split_cachepool_cmd },
+ { lvconvert_split_and_keep_cache_CMD, lvconvert_split_cache_cmd },
+ { lvconvert_split_and_remove_cache_CMD, lvconvert_split_cache_cmd },
/* lvconvert raid-related type conversions */
{ lvconvert_raid_types_CMD, lvconvert_raid_types_cmd },
@@ -2120,7 +2121,7 @@ static int _process_command_line(struct cmd_context *cmd, int *argc, char ***arg
* value (e.g. foo_ARG) from the args array.
*/
if ((arg_enum = _find_arg(cmd->name, goval)) < 0) {
- log_fatal("Unrecognised option.");
+ log_fatal("Unrecognised option %d (%c).", goval, goval);
return 0;
}
diff --git a/tools/toollib.c b/tools/toollib.c
index c2ffa42..2ab4b62 100644
--- a/tools/toollib.c
+++ b/tools/toollib.c
@@ -2562,6 +2562,8 @@ static int _lv_is_type(struct cmd_context *cmd, struct logical_volume *lv, int l
return seg_is_any_raid6(seg);
case raid10_LVT:
return seg_is_raid10(seg);
+ case writecache_LVT:
+ return seg_is_writecache(seg);
case error_LVT:
return !strcmp(seg->segtype->name, SEG_TYPE_NAME_ERROR);
case zero_LVT:
@@ -2618,6 +2620,8 @@ int get_lvt_enum(struct logical_volume *lv)
return raid6_LVT;
if (seg_is_raid10(seg))
return raid10_LVT;
+ if (seg_is_writecache(seg))
+ return writecache_LVT;
if (!strcmp(seg->segtype->name, SEG_TYPE_NAME_ERROR))
return error_LVT;
@@ -2740,8 +2744,13 @@ static int _check_lv_types(struct cmd_context *cmd, struct logical_volume *lv, i
if (!ret) {
int lvt_enum = get_lvt_enum(lv);
struct lv_type *type = get_lv_type(lvt_enum);
- log_warn("Command on LV %s does not accept LV type %s.",
- display_lvname(lv), type ? type->name : "unknown");
+ if (!type) {
+ log_warn("Command on LV %s does not accept LV type unknown (%d).",
+ display_lvname(lv), lvt_enum);
+ } else {
+ log_warn("Command on LV %s does not accept LV type %s.",
+ display_lvname(lv), type->name);
+ }
}
return ret;
diff --git a/tools/tools.h b/tools/tools.h
index 55d486b..5e0cd30 100644
--- a/tools/tools.h
+++ b/tools/tools.h
@@ -248,11 +248,12 @@ int lvconvert_start_poll_cmd(struct cmd_context *cmd, int argc, char **argv);
int lvconvert_to_pool_cmd(struct cmd_context *cmd, int argc, char **argv);
int lvconvert_to_cache_vol_cmd(struct cmd_context *cmd, int argc, char **argv);
+int lvconvert_to_writecache_vol_cmd(struct cmd_context *cmd, int argc, char **argv);
int lvconvert_to_thin_with_external_cmd(struct cmd_context *cmd, int argc, char **argv);
int lvconvert_swap_pool_metadata_cmd(struct cmd_context *cmd, int argc, char **argv);
int lvconvert_to_pool_or_swap_metadata_cmd(struct cmd_context *cmd, int argc, char **argv);
int lvconvert_merge_thin_cmd(struct cmd_context *cmd, int argc, char **argv);
-int lvconvert_split_cachepool_cmd(struct cmd_context *cmd, int argc, char **argv);
+int lvconvert_split_cache_cmd(struct cmd_context *cmd, int argc, char **argv);
int lvconvert_raid_types_cmd(struct cmd_context * cmd, int argc, char **argv);
int lvconvert_split_mirror_images_cmd(struct cmd_context * cmd, int argc, char **argv);
5 years, 5 months
master - Allow dm-cache cache device to be standard LV
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=cac4a9743acb826d785...
Commit: cac4a9743acb826d785c0e51e9a752d8959ced80
Parent: 8c9d9a744605e37799a2475932ae7dfd43831d08
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Fri Aug 17 15:45:52 2018 -0500
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Tue Nov 6 13:44:54 2018 -0600
Allow dm-cache cache device to be standard LV
If a single, standard LV is specified as the cache, use
it directly instead of converting it into a cache-pool
object with two separate LVs (for data and metadata).
With a single LV as the cache, lvm will use blocks at the
beginning for metadata, and the rest for data. Separate
dm linear devices are set up to point at the metadata and
data areas of the LV. These dm devs are given to the
dm-cache target to use.
The single LV cache cannot be resized without recreating it.
If the --poolmetadata option is used to specify an LV for
metadata, then a cache pool will be created (with separate
LVs for data and metadata.)
Usage:
$ lvcreate -n main -L 128M vg /dev/loop0
$ lvcreate -n fast -L 64M vg /dev/loop1
$ lvs -a vg
LV VG Attr LSize Type Devices
main vg -wi-a----- 128.00m linear /dev/loop0(0)
fast vg -wi-a----- 64.00m linear /dev/loop1(0)
$ lvconvert --type cache --cachepool fast vg/main
$ lvs -a vg
LV VG Attr LSize Origin Pool Type Devices
[fast] vg Cwi---C--- 64.00m linear /dev/loop1(0)
main vg Cwi---C--- 128.00m [main_corig] [fast] cache main_corig(0)
[main_corig] vg owi---C--- 128.00m linear /dev/loop0(0)
$ lvchange -ay vg/main
$ dmsetup ls
vg-fast_cdata (253:4)
vg-fast_cmeta (253:5)
vg-main_corig (253:6)
vg-main (253:24)
vg-fast (253:3)
$ dmsetup table
vg-fast_cdata: 0 98304 linear 253:3 32768
vg-fast_cmeta: 0 32768 linear 253:3 0
vg-main_corig: 0 262144 linear 7:0 2048
vg-main: 0 262144 cache 253:5 253:4 253:6 128 2 metadata2 writethrough mq 0
vg-fast: 0 131072 linear 7:1 2048
$ lvchange -an vg/min
$ lvconvert --splitcache vg/main
$ lvs -a vg
LV VG Attr LSize Type Devices
fast vg -wi------- 64.00m linear /dev/loop1(0)
main vg -wi------- 128.00m linear /dev/loop0(0)
---
device_mapper/all.h | 4 +
device_mapper/libdm-deptree.c | 15 +-
lib/activate/dev_manager.c | 269 ++++++++++++++++++++-
lib/activate/dev_manager.h | 10 +
lib/cache_segtype/cache.c | 125 +++++++++-
lib/format_text/flags.c | 1 +
lib/locking/lvmlockd.c | 3 +
lib/metadata/cache_manip.c | 478 +++++++++++++++++++++++++++++++++---
lib/metadata/lv.c | 18 +-
lib/metadata/lv_manip.c | 10 +-
lib/metadata/merge.c | 43 +++-
lib/metadata/metadata-exported.h | 24 ++-
lib/report/report.c | 20 ++-
test/shell/cache-single-options.sh | 269 ++++++++++++++++++++
test/shell/cache-single-thin.sh | 43 ++++
test/shell/cache-single-types.sh | 88 +++++++
test/shell/cache-single-usage.sh | 129 ++++++++++
test/shell/lvconvert-cache-raid.sh | 41 ++--
test/shell/lvconvert-cache.sh | 1 +
test/shell/lvrename-cache-thin.sh | 2 +
tools/lvchange.c | 5 +-
tools/lvconvert.c | 174 ++++++++++++--
tools/vgsplit.c | 5 +-
23 files changed, 1666 insertions(+), 111 deletions(-)
diff --git a/device_mapper/all.h b/device_mapper/all.h
index e56bae9..0f01075 100644
--- a/device_mapper/all.h
+++ b/device_mapper/all.h
@@ -912,6 +912,10 @@ int dm_tree_node_add_cache_target(struct dm_tree_node *node,
const char *origin_uuid,
const char *policy_name,
const struct dm_config_node *policy_settings,
+ uint64_t metadata_start,
+ uint64_t metadata_len,
+ uint64_t data_start,
+ uint64_t data_len,
uint32_t data_block_size);
/*
diff --git a/device_mapper/libdm-deptree.c b/device_mapper/libdm-deptree.c
index 5d03545..13239c7 100644
--- a/device_mapper/libdm-deptree.c
+++ b/device_mapper/libdm-deptree.c
@@ -189,6 +189,11 @@ struct load_segment {
uint32_t min_recovery_rate; /* raid kB/sec/disk */
uint32_t data_copies; /* raid10 data_copies */
+ uint64_t metadata_start; /* Cache */
+ uint64_t metadata_len; /* Cache */
+ uint64_t data_start; /* Cache */
+ uint64_t data_len; /* Cache */
+
struct dm_tree_node *metadata; /* Thin_pool + Cache */
struct dm_tree_node *pool; /* Thin_pool, Thin */
struct dm_tree_node *external; /* Thin */
@@ -3473,6 +3478,10 @@ int dm_tree_node_add_cache_target(struct dm_tree_node *node,
const char *origin_uuid,
const char *policy_name,
const struct dm_config_node *policy_settings,
+ uint64_t metadata_start,
+ uint64_t metadata_len,
+ uint64_t data_start,
+ uint64_t data_len,
uint32_t data_block_size)
{
struct dm_config_node *cn;
@@ -3548,6 +3557,10 @@ int dm_tree_node_add_cache_target(struct dm_tree_node *node,
if (!_link_tree_nodes(node, seg->origin))
return_0;
+ seg->metadata_start = metadata_start;
+ seg->metadata_len = metadata_len;
+ seg->data_start = data_start;
+ seg->data_len = data_len;
seg->data_block_size = data_block_size;
seg->flags = feature_flags;
seg->policy_name = policy_name;
@@ -4026,7 +4039,7 @@ int dm_tree_node_add_cache_target_base(struct dm_tree_node *node,
return dm_tree_node_add_cache_target(node, size, feature_flags & _mask,
metadata_uuid, data_uuid, origin_uuid,
- policy_name, policy_settings, data_block_size);
+ policy_name, policy_settings, 0, 0, 0, 0, data_block_size);
}
#endif
diff --git a/lib/activate/dev_manager.c b/lib/activate/dev_manager.c
index 15dec6f..1249581 100644
--- a/lib/activate/dev_manager.c
+++ b/lib/activate/dev_manager.c
@@ -827,6 +827,113 @@ static int _info(struct cmd_context *cmd,
return 1;
}
+/* FIXME: could we just use dev_manager_info instead of this? */
+
+int get_cache_single_meta_data(struct cmd_context *cmd,
+ struct logical_volume *lv,
+ struct logical_volume *pool_lv,
+ struct dm_info *info_meta, struct dm_info *info_data)
+{
+ struct lv_segment *lvseg = first_seg(lv);
+ union lvid lvid_meta;
+ union lvid lvid_data;
+ char *name_meta;
+ char *name_data;
+ char *dlid_meta;
+ char *dlid_data;
+
+ memset(&lvid_meta, 0, sizeof(lvid_meta));
+ memset(&lvid_data, 0, sizeof(lvid_meta));
+ memcpy(&lvid_meta.id[0], &lv->vg->id, sizeof(struct id));
+ memcpy(&lvid_meta.id[1], &lvseg->metadata_id, sizeof(struct id));
+ memcpy(&lvid_data.id[0], &lv->vg->id, sizeof(struct id));
+ memcpy(&lvid_data.id[1], &lvseg->data_id, sizeof(struct id));
+
+ if (!(dlid_meta = dm_build_dm_uuid(cmd->mem, UUID_PREFIX, (const char *)&lvid_meta.s, NULL)))
+ return_0;
+ if (!(dlid_data = dm_build_dm_uuid(cmd->mem, UUID_PREFIX, (const char *)&lvid_data.s, NULL)))
+ return_0;
+ if (!(name_meta = dm_build_dm_name(cmd->mem, lv->vg->name, pool_lv->name, "_cmeta")))
+ return_0;
+ if (!(name_data = dm_build_dm_name(cmd->mem, lv->vg->name, pool_lv->name, "_cdata")))
+ return_0;
+
+ if (!_info(cmd, name_meta, dlid_meta, 1, 0, info_meta, NULL, NULL))
+ return_0;
+
+ if (!_info(cmd, name_data, dlid_data, 1, 0, info_data, NULL, NULL))
+ return_0;
+
+ return 1;
+}
+
+/*
+ * FIXME: isn't there a simpler, more direct way to just remove these two dm
+ * devs?
+ */
+
+int remove_cache_single_meta_data(struct cmd_context *cmd,
+ struct dm_info *info_meta, struct dm_info *info_data)
+{
+ struct dm_tree *dtree;
+ struct dm_tree_node *root;
+ struct dm_tree_node *child;
+ const char *uuid;
+ void *handle = NULL;
+
+ if (!(dtree = dm_tree_create()))
+ goto_out;
+
+ if (!dm_tree_add_dev(dtree, info_meta->major, info_meta->minor))
+ goto_out;
+
+ if (!dm_tree_add_dev(dtree, info_data->major, info_data->minor))
+ goto_out;
+
+ if (!(root = dm_tree_find_node(dtree, 0, 0)))
+ goto_out;
+
+ while ((child = dm_tree_next_child(&handle, root, 0))) {
+ if (!(uuid = dm_tree_node_get_uuid(child))) {
+ stack;
+ continue;
+ }
+
+ if (!dm_tree_deactivate_children(root, uuid, strlen(uuid))) {
+ stack;
+ continue;
+ }
+ }
+
+ dm_tree_free(dtree);
+ return 1;
+ out:
+ dm_tree_free(dtree);
+ return 0;
+}
+
+int dev_manager_remove_dm_major_minor(uint32_t major, uint32_t minor)
+{
+ struct dm_task *dmt;
+ int r = 0;
+
+ log_verbose("Removing dm dev %u:%u", major, minor);
+
+ if (!(dmt = dm_task_create(DM_DEVICE_REMOVE)))
+ return_0;
+
+ if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
+ log_error("Failed to set device number for remove %u:%u", major, minor);
+ goto out;
+ }
+
+ r = dm_task_run(dmt);
+out:
+ dm_task_destroy(dmt);
+
+ return r;
+}
+
static int _info_by_dev(uint32_t major, uint32_t minor, struct dm_info *info)
{
return _info_run(NULL, info, NULL, 0, 0, 0, major, minor);
@@ -2236,6 +2343,10 @@ static int _pool_register_callback(struct dev_manager *dm,
return 1;
#endif
+ /* Skip for single-device cache pool */
+ if (lv_is_cache(lv) && lv_is_cache_single(first_seg(lv)->pool_lv))
+ return 1;
+
if (!(data = dm_pool_zalloc(dm->mem, sizeof(*data)))) {
log_error("Failed to allocated path for callback.");
return 0;
@@ -2303,6 +2414,53 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
/* Unused cache pool is activated as metadata */
}
+ if (lv_is_cache(lv) && lv_is_cache_single(first_seg(lv)->pool_lv) && dm->activation) {
+ struct logical_volume *pool_lv = first_seg(lv)->pool_lv;
+ struct lv_segment *lvseg = first_seg(lv);
+ struct dm_info info_meta;
+ struct dm_info info_data;
+ union lvid lvid_meta;
+ union lvid lvid_data;
+ char *name_meta;
+ char *name_data;
+ char *dlid_meta;
+ char *dlid_data;
+
+ memset(&lvid_meta, 0, sizeof(lvid_meta));
+ memset(&lvid_data, 0, sizeof(lvid_meta));
+ memcpy(&lvid_meta.id[0], &lv->vg->id, sizeof(struct id));
+ memcpy(&lvid_meta.id[1], &lvseg->metadata_id, sizeof(struct id));
+ memcpy(&lvid_data.id[0], &lv->vg->id, sizeof(struct id));
+ memcpy(&lvid_data.id[1], &lvseg->data_id, sizeof(struct id));
+
+ if (!(dlid_meta = dm_build_dm_uuid(dm->mem, UUID_PREFIX, (const char *)&lvid_meta.s, NULL)))
+ return_0;
+ if (!(dlid_data = dm_build_dm_uuid(dm->mem, UUID_PREFIX, (const char *)&lvid_data.s, NULL)))
+ return_0;
+ if (!(name_meta = dm_build_dm_name(dm->mem, lv->vg->name, pool_lv->name, "_cmeta")))
+ return_0;
+ if (!(name_data = dm_build_dm_name(dm->mem, lv->vg->name, pool_lv->name, "_cdata")))
+ return_0;
+
+ if (!_info(dm->cmd, name_meta, dlid_meta, 1, 0, &info_meta, NULL, NULL))
+ return_0;
+
+ if (!_info(dm->cmd, name_data, dlid_data, 1, 0, &info_data, NULL, NULL))
+ return_0;
+
+ if (info_meta.exists &&
+ !dm_tree_add_dev_with_udev_flags(dtree, info_meta.major, info_meta.minor,
+ _get_udev_flags(dm, lv, NULL, 0, 0, 0))) {
+ log_error("Failed to add device (%" PRIu32 ":%" PRIu32") to dtree.", info_meta.major, info_meta.minor);
+ }
+
+ if (info_data.exists &&
+ !dm_tree_add_dev_with_udev_flags(dtree, info_data.major, info_data.minor,
+ _get_udev_flags(dm, lv, NULL, 0, 0, 0))) {
+ log_error("Failed to add device (%" PRIu32 ":%" PRIu32") to dtree.", info_data.major, info_data.minor);
+ }
+ }
+
if (!origin_only && !_add_dev_to_dtree(dm, dtree, lv, NULL))
return_0;
@@ -2444,7 +2602,7 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
!_add_lv_to_dtree(dm, dtree, seg->metadata_lv, 0))
return_0;
if (seg->pool_lv &&
- (lv_is_cache_pool(seg->pool_lv) || dm->track_external_lv_deps) &&
+ (lv_is_cache_pool(seg->pool_lv) || lv_is_cache_single(seg->pool_lv) || dm->track_external_lv_deps) &&
/* When activating and not origin_only detect linear 'overlay' over pool */
!_add_lv_to_dtree(dm, dtree, seg->pool_lv, dm->activation ? origin_only : 1))
return_0;
@@ -2941,6 +3099,14 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
int save_pending_delete = dm->track_pending_delete;
int merge_in_progress = 0;
+ if (!(lvlayer = dm_pool_alloc(dm->mem, sizeof(*lvlayer)))) {
+ log_error("_add_new_lv_to_dtree: pool alloc failed for %s %s.",
+ display_lvname(lv), layer);
+ return 0;
+ }
+ lvlayer->lv = lv;
+ lvlayer->visible_component = (laopts->component_lv == lv) ? 1 : 0;
+
log_debug_activation("Adding new LV %s%s%s to dtree", display_lvname(lv),
layer ? "-" : "", layer ? : "");
/* LV with pending delete is never put new into a table */
@@ -2957,6 +3123,99 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
return 1;
}
+ if (lv_is_cache(lv) && lv_is_cache_single(first_seg(lv)->pool_lv)) {
+ struct logical_volume *pool_lv = first_seg(lv)->pool_lv;
+ struct lv_segment *lvseg = first_seg(lv);
+ struct volume_group *vg = lv->vg;
+ struct dm_tree_node *dnode_meta;
+ struct dm_tree_node *dnode_data;
+ union lvid lvid_meta;
+ union lvid lvid_data;
+ char *name_meta;
+ char *name_data;
+ char *dlid_meta;
+ char *dlid_data;
+ char *dlid_pool;
+ uint64_t meta_len = first_seg(lv)->metadata_len;
+ uint64_t data_len = first_seg(lv)->data_len;
+ uint16_t udev_flags = _get_udev_flags(dm, lv, layer,
+ laopts->noscan, laopts->temporary,
+ 0);
+
+ log_debug("Add cache pool %s to dtree before cache %s", pool_lv->name, lv->name);
+
+ if (!_add_new_lv_to_dtree(dm, dtree, pool_lv, laopts, NULL)) {
+ log_error("Failed to add cachepool to dtree before cache");
+ return_0;
+ }
+
+ memset(&lvid_meta, 0, sizeof(lvid_meta));
+ memset(&lvid_data, 0, sizeof(lvid_meta));
+ memcpy(&lvid_meta.id[0], &vg->id, sizeof(struct id));
+ memcpy(&lvid_meta.id[1], &lvseg->metadata_id, sizeof(struct id));
+ memcpy(&lvid_data.id[0], &vg->id, sizeof(struct id));
+ memcpy(&lvid_data.id[1], &lvseg->data_id, sizeof(struct id));
+
+ if (!(dlid_meta = dm_build_dm_uuid(dm->mem, UUID_PREFIX, (const char *)&lvid_meta.s, NULL)))
+ return_0;
+ if (!(dlid_data = dm_build_dm_uuid(dm->mem, UUID_PREFIX, (const char *)&lvid_data.s, NULL)))
+ return_0;
+
+ if (!(name_meta = dm_build_dm_name(dm->mem, vg->name, pool_lv->name, "_cmeta")))
+ return_0;
+ if (!(name_data = dm_build_dm_name(dm->mem, vg->name, pool_lv->name, "_cdata")))
+ return_0;
+
+ if (!(dlid_pool = build_dm_uuid(dm->mem, pool_lv, NULL)))
+ return_0;
+
+ /* add meta dnode */
+ if (!(dnode_meta = dm_tree_add_new_dev_with_udev_flags(dtree,
+ name_meta,
+ dlid_meta,
+ -1, -1,
+ read_only_lv(lv, laopts, layer),
+ ((lv->vg->status & PRECOMMITTED) | laopts->revert) ? 1 : 0,
+ lvlayer,
+ udev_flags)))
+ return_0;
+
+ /* add load_segment to meta dnode: linear, size of meta area */
+ if (!add_linear_area_to_dtree(dnode_meta,
+ meta_len,
+ lv->vg->extent_size,
+ lv->vg->cmd->use_linear_target,
+ lv->vg->name, lv->name))
+ return_0;
+
+ /* add seg_area to prev load_seg: offset 0 maps to cachepool lv offset 0 */
+ if (!dm_tree_node_add_target_area(dnode_meta, NULL, dlid_pool, 0))
+ return_0;
+
+ /* add data dnode */
+ if (!(dnode_data = dm_tree_add_new_dev_with_udev_flags(dtree,
+ name_data,
+ dlid_data,
+ -1, -1,
+ read_only_lv(lv, laopts, layer),
+ ((lv->vg->status & PRECOMMITTED) | laopts->revert) ? 1 : 0,
+ lvlayer,
+ udev_flags)))
+ return_0;
+
+ /* add load_segment to data dnode: linear, size of data area */
+ if (!add_linear_area_to_dtree(dnode_data,
+ data_len,
+ lv->vg->extent_size,
+ lv->vg->cmd->use_linear_target,
+ lv->vg->name, lv->name))
+ return_0;
+
+ /* add seg_area to prev load_seg: offset 0 maps to cachepool lv after meta */
+ if (!dm_tree_node_add_target_area(dnode_data, NULL, dlid_pool, meta_len))
+ return_0;
+ }
+
/* FIXME Seek a simpler way to lay out the snapshot-merge tree. */
if (!layer && lv_is_merging_origin(lv)) {
@@ -3025,12 +3284,6 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
dm_tree_node_get_context(dnode))
return 1;
- if (!(lvlayer = dm_pool_alloc(dm->mem, sizeof(*lvlayer)))) {
- log_error("_add_new_lv_to_dtree: pool alloc failed for %s %s.",
- display_lvname(lv), layer);
- return 0;
- }
-
lvlayer->lv = lv;
lvlayer->visible_component = (laopts->component_lv == lv) ? 1 : 0;
@@ -3121,7 +3374,7 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
!_pool_register_callback(dm, dnode, lv))
return_0;
- if (lv_is_cache(lv) &&
+ if (lv_is_cache(lv) && !lv_is_cache_single(first_seg(lv)->pool_lv) &&
/* Register callback only for layer activation or non-layered cache LV */
(layer || !lv_layer(lv)) &&
/* Register callback when metadata LV is NOT already active */
diff --git a/lib/activate/dev_manager.h b/lib/activate/dev_manager.h
index bd96832..b669bd2 100644
--- a/lib/activate/dev_manager.h
+++ b/lib/activate/dev_manager.h
@@ -103,4 +103,14 @@ int dev_manager_execute(struct dev_manager *dm);
int dev_manager_device_uses_vg(struct device *dev,
struct volume_group *vg);
+int dev_manager_remove_dm_major_minor(uint32_t major, uint32_t minor);
+
+int get_cache_single_meta_data(struct cmd_context *cmd,
+ struct logical_volume *lv,
+ struct logical_volume *pool_lv,
+ struct dm_info *info_meta, struct dm_info *info_data);
+
+int remove_cache_single_meta_data(struct cmd_context *cmd,
+ struct dm_info *info_meta, struct dm_info *info_data);
+
#endif
diff --git a/lib/cache_segtype/cache.c b/lib/cache_segtype/cache.c
index 17f94d1..8a97b30 100644
--- a/lib/cache_segtype/cache.c
+++ b/lib/cache_segtype/cache.c
@@ -49,7 +49,10 @@ static void _cache_display(const struct lv_segment *seg)
const struct dm_config_node *n;
const struct lv_segment *setting_seg = NULL;
- if (seg_is_cache_pool(seg))
+ if (seg_is_cache(seg) && lv_is_cache_single(seg->pool_lv))
+ setting_seg = seg;
+
+ else if (seg_is_cache_pool(seg))
setting_seg = seg;
else if (seg_is_cache(seg))
@@ -474,6 +477,7 @@ static int _cache_text_import(struct lv_segment *seg,
{
struct logical_volume *pool_lv, *origin_lv;
const char *name;
+ const char *uuid;
if (!dm_config_has_node(sn, "cache_pool"))
return SEG_LOG_ERROR("cache_pool not specified in");
@@ -503,9 +507,44 @@ static int _cache_text_import(struct lv_segment *seg,
if (!attach_pool_lv(seg, pool_lv, NULL, NULL, NULL))
return_0;
- /* load order is unknown, could be cache origin or pool LV, so check for both */
- if (!dm_list_empty(&pool_lv->segments))
- _fix_missing_defaults(first_seg(pool_lv));
+ if (!_settings_text_import(seg, sn))
+ return_0;
+
+ if (dm_config_has_node(sn, "metadata_format")) {
+ if (!dm_config_get_uint32(sn, "metadata_format", &seg->cache_metadata_format))
+ return SEG_LOG_ERROR("Couldn't read cache metadata_format in");
+ if (seg->cache_metadata_format != CACHE_METADATA_FORMAT_2)
+ return SEG_LOG_ERROR("Unknown cache metadata format %u number in",
+ seg->cache_metadata_format);
+ }
+
+ if (dm_config_has_node(sn, "metadata_start")) {
+ if (!dm_config_get_uint64(sn, "metadata_start", &seg->metadata_start))
+ return SEG_LOG_ERROR("Couldn't read metadata_start in");
+ if (!dm_config_get_uint64(sn, "metadata_len", &seg->metadata_len))
+ return SEG_LOG_ERROR("Couldn't read metadata_len in");
+ if (!dm_config_get_uint64(sn, "data_start", &seg->data_start))
+ return SEG_LOG_ERROR("Couldn't read data_start in");
+ if (!dm_config_get_uint64(sn, "data_len", &seg->data_len))
+ return SEG_LOG_ERROR("Couldn't read data_len in");
+
+ if (!dm_config_get_str(sn, "metadata_id", &uuid))
+ return SEG_LOG_ERROR("Couldn't read metadata_id in");
+
+ if (!id_read_format(&seg->metadata_id, uuid))
+ return SEG_LOG_ERROR("Couldn't format metadata_id in");
+
+ if (!dm_config_get_str(sn, "data_id", &uuid))
+ return SEG_LOG_ERROR("Couldn't read data_id in");
+
+ if (!id_read_format(&seg->data_id, uuid))
+ return SEG_LOG_ERROR("Couldn't format data_id in");
+ } else {
+ /* Do not call this when LV is cache_single. */
+ /* load order is unknown, could be cache origin or pool LV, so check for both */
+ if (!dm_list_empty(&pool_lv->segments))
+ _fix_missing_defaults(first_seg(pool_lv));
+ }
return 1;
}
@@ -520,6 +559,8 @@ static int _cache_text_import_area_count(const struct dm_config_node *sn,
static int _cache_text_export(const struct lv_segment *seg, struct formatter *f)
{
+ char buffer[40];
+
if (!seg_lv(seg, 0))
return_0;
@@ -529,6 +570,26 @@ static int _cache_text_export(const struct lv_segment *seg, struct formatter *f)
if (seg->cleaner_policy)
outf(f, "cleaner = 1");
+ if (lv_is_cache_single(seg->pool_lv)) {
+ outf(f, "metadata_format = " FMTu32, seg->cache_metadata_format);
+
+ if (!_settings_text_export(seg, f))
+ return_0;
+
+ outf(f, "metadata_start = " FMTu64, seg->metadata_start);
+ outf(f, "metadata_len = " FMTu64, seg->metadata_len);
+ outf(f, "data_start = " FMTu64, seg->data_start);
+ outf(f, "data_len = " FMTu64, seg->data_len);
+
+ if (!id_write_format(&seg->metadata_id, buffer, sizeof(buffer)))
+ return_0;
+ outf(f, "metadata_id = \"%s\"", buffer);
+
+ if (!id_write_format(&seg->data_id, buffer, sizeof(buffer)))
+ return_0;
+ outf(f, "data_id = \"%s\"", buffer);
+ }
+
return 1;
}
@@ -544,6 +605,8 @@ static int _cache_add_target_line(struct dev_manager *dm,
{
struct lv_segment *cache_pool_seg;
struct lv_segment *setting_seg;
+ union lvid metadata_lvid;
+ union lvid data_lvid;
char *metadata_uuid, *data_uuid, *origin_uuid;
uint64_t feature_flags = 0;
unsigned attr;
@@ -557,7 +620,10 @@ static int _cache_add_target_line(struct dev_manager *dm,
cache_pool_seg = first_seg(seg->pool_lv);
- setting_seg = cache_pool_seg;
+ if (lv_is_cache_single(seg->pool_lv))
+ setting_seg = seg;
+ else
+ setting_seg = cache_pool_seg;
if (seg->cleaner_policy)
/* With cleaner policy always pass writethrough */
@@ -599,14 +665,45 @@ static int _cache_add_target_line(struct dev_manager *dm,
return 0;
}
- if (!(metadata_uuid = build_dm_uuid(mem, cache_pool_seg->metadata_lv, NULL)))
+ if (!(origin_uuid = build_dm_uuid(mem, seg_lv(seg, 0), NULL)))
return_0;
- if (!(data_uuid = build_dm_uuid(mem, seg_lv(cache_pool_seg, 0), NULL)))
- return_0;
+ if (!lv_is_cache_single(seg->pool_lv)) {
+ /* We don't use start/len when using separate data/meta devices. */
+ if (seg->metadata_len || seg->data_len) {
+ log_error(INTERNAL_ERROR "LV %s using unsupported ranges with cache pool.",
+ display_lvname(seg->lv));
+ return 0;
+ }
- if (!(origin_uuid = build_dm_uuid(mem, seg_lv(seg, 0), NULL)))
- return_0;
+ if (!(metadata_uuid = build_dm_uuid(mem, cache_pool_seg->metadata_lv, NULL)))
+ return_0;
+
+ if (!(data_uuid = build_dm_uuid(mem, seg_lv(cache_pool_seg, 0), NULL)))
+ return_0;
+ } else {
+ if (!seg->metadata_len || !seg->data_len || (seg->metadata_start == seg->data_start)) {
+ log_error(INTERNAL_ERROR "LV %s has invalid ranges metadata %llu %llu data %llu %llu.",
+ display_lvname(seg->lv),
+ (unsigned long long)seg->metadata_start,
+ (unsigned long long)seg->metadata_len,
+ (unsigned long long)seg->data_start,
+ (unsigned long long)seg->data_len);
+ return 0;
+ }
+
+ memset(&metadata_lvid, 0, sizeof(metadata_lvid));
+ memset(&data_lvid, 0, sizeof(data_lvid));
+ memcpy(&metadata_lvid.id[0], &seg->lv->vg->id, sizeof(struct id));
+ memcpy(&metadata_lvid.id[1], &seg->metadata_id, sizeof(struct id));
+ memcpy(&data_lvid.id[0], &seg->lv->vg->id, sizeof(struct id));
+ memcpy(&data_lvid.id[1], &seg->data_id, sizeof(struct id));
+
+ if (!(metadata_uuid = dm_build_dm_uuid(mem, UUID_PREFIX, (const char *)&metadata_lvid.s, NULL)))
+ return_0;
+ if (!(data_uuid = dm_build_dm_uuid(mem, UUID_PREFIX, (const char *)&data_lvid.s, NULL)))
+ return_0;
+ }
if (!dm_tree_node_add_cache_target(node, len,
feature_flags,
@@ -616,8 +713,12 @@ static int _cache_add_target_line(struct dev_manager *dm,
seg->cleaner_policy ? "cleaner" :
/* undefined policy name -> likely an old "mq" */
cache_pool_seg->policy_name ? : "mq",
- seg->cleaner_policy ? NULL : cache_pool_seg->policy_settings,
- cache_pool_seg->chunk_size))
+ seg->cleaner_policy ? NULL : setting_seg->policy_settings,
+ seg->metadata_start,
+ seg->metadata_len,
+ seg->data_start,
+ seg->data_len,
+ setting_seg->chunk_size))
return_0;
return 1;
diff --git a/lib/format_text/flags.c b/lib/format_text/flags.c
index 6f5ff9f..d7c4318 100644
--- a/lib/format_text/flags.c
+++ b/lib/format_text/flags.c
@@ -72,6 +72,7 @@ static const struct flag _lv_flags[] = {
{LV_ACTIVATION_SKIP, "ACTIVATION_SKIP", COMPATIBLE_FLAG},
{LV_ERROR_WHEN_FULL, "ERROR_WHEN_FULL", COMPATIBLE_FLAG},
{LV_METADATA_FORMAT, "METADATA_FORMAT", SEGTYPE_FLAG},
+ {LV_CACHE_SINGLE, "CACHE_SINGLE", STATUS_FLAG},
{LV_NOSCAN, NULL, 0},
{LV_TEMPORARY, NULL, 0},
{POOL_METADATA_SPARE, NULL, 0},
diff --git a/lib/locking/lvmlockd.c b/lib/locking/lvmlockd.c
index 969a7fe..530378c 100644
--- a/lib/locking/lvmlockd.c
+++ b/lib/locking/lvmlockd.c
@@ -2779,6 +2779,9 @@ int lockd_lv_uses_lock(struct logical_volume *lv)
if (lv_is_pool_metadata_spare(lv))
return 0;
+ if (lv_is_cache_single(lv))
+ return 0;
+
if (lv_is_cache_pool(lv))
return 0;
diff --git a/lib/metadata/cache_manip.c b/lib/metadata/cache_manip.c
index 8e8e704..6cf945b 100644
--- a/lib/metadata/cache_manip.c
+++ b/lib/metadata/cache_manip.c
@@ -23,6 +23,7 @@
#include "lib/config/defaults.h"
#include "lib/metadata/lv_alloc.h"
#include "lib/misc/lvm-signal.h"
+#include "lib/activate/dev_manager.h"
/* https://github.com/jthornber/thin-provisioning-tools/blob/master/caching/... */
#define DM_TRANSACTION_OVERHEAD 4096 /* KiB */
@@ -44,12 +45,26 @@ const char *cache_mode_num_to_str(cache_mode_t mode)
}
}
+const char *get_cache_mode_name(const struct lv_segment *pool_seg)
+{
+ const char *str;
+
+ if (!(str = cache_mode_num_to_str(pool_seg->cache_mode))) {
+ log_error(INTERNAL_ERROR "Cache pool %s has undefined cache mode, using writethrough instead.",
+ display_lvname(pool_seg->lv));
+ str = "writethrough";
+ }
+ return str;
+}
+
const char *display_cache_mode(const struct lv_segment *seg)
{
const struct lv_segment *setting_seg = NULL;
- const char *str;
- if (seg_is_cache_pool(seg))
+ if (seg_is_cache(seg) && lv_is_cache_single(seg->pool_lv))
+ setting_seg = seg;
+
+ else if (seg_is_cache_pool(seg))
setting_seg = seg;
else if (seg_is_cache(seg))
@@ -58,25 +73,7 @@ const char *display_cache_mode(const struct lv_segment *seg)
if (!setting_seg || (setting_seg->cache_mode == CACHE_MODE_UNSELECTED))
return "";
- if (!(str = cache_mode_num_to_str(setting_seg->cache_mode))) {
- log_error(INTERNAL_ERROR "Cache pool %s has undefined cache mode, using writethrough instead.",
- display_lvname(seg->lv));
- str = "writethrough";
- }
-
- return str;
-}
-
-const char *get_cache_mode_name(const struct lv_segment *pool_seg)
-{
- const char *str;
-
- if (!(str = cache_mode_num_to_str(pool_seg->cache_mode))) {
- log_error(INTERNAL_ERROR "Cache pool %s has undefined cache mode, using writethrough instead.",
- display_lvname(pool_seg->lv));
- str = "writethrough";
- }
- return str;
+ return cache_mode_num_to_str(setting_seg->cache_mode);
}
int set_cache_mode(cache_mode_t *mode, const char *cache_mode)
@@ -134,7 +131,10 @@ int cache_set_cache_mode(struct lv_segment *seg, cache_mode_t mode)
if (seg_is_cache_pool(seg) && (mode == CACHE_MODE_UNSELECTED))
return 1;
- if (seg_is_cache_pool(seg))
+ if (seg_is_cache(seg) && lv_is_cache_single(seg->pool_lv))
+ setting_seg = seg;
+
+ else if (seg_is_cache_pool(seg))
setting_seg = seg;
else if (seg_is_cache(seg))
@@ -334,7 +334,7 @@ int validate_lv_cache_create_pool(const struct logical_volume *pool_lv)
{
struct lv_segment *seg;
- if (!lv_is_cache_pool(pool_lv)) {
+ if (!lv_is_cache_pool(pool_lv) && !lv_is_cache_single(pool_lv)) {
log_error("Logical volume %s is not a cache pool.",
display_lvname(pool_lv));
return 0;
@@ -555,6 +555,187 @@ int lv_cache_wait_for_clean(struct logical_volume *cache_lv, int *is_clean)
return 1;
}
+
+static int _lv_detach_cache_single_while_active(struct cmd_context *cmd, struct logical_volume *cache_lv)
+{
+ struct lv_segment *cache_seg = first_seg(cache_lv);
+ struct logical_volume *corigin_lv;
+ struct logical_volume *cache_pool_lv;
+ struct lvinfo corigin_info;
+ struct dm_info info_meta;
+ struct dm_info info_data;
+ int is_clear;
+
+ cache_pool_lv = cache_seg->pool_lv;
+
+ corigin_lv = seg_lv(cache_seg, 0);
+
+ /*
+ * This info is needed to remove the corigin lv at the end.
+ */
+ if (!lv_info(cmd, corigin_lv, 1, &corigin_info, 0, 0))
+ log_error("Failed to get info about corigin %s", display_lvname(corigin_lv));
+
+ /*
+ * This info is needed to remove the cmeta/cdata devs at the end.
+ */
+ if (!get_cache_single_meta_data(cmd, cache_lv, cache_pool_lv, &info_meta, &info_data)) {
+ log_error("Failed to get info about cdata/cmeta for %s", display_lvname(cache_pool_lv));
+ return 0;
+ }
+
+ /*
+ * Flush the cache.
+ */
+ if (!lv_cache_wait_for_clean(cache_lv, &is_clear)) {
+ log_error("Failed to flush cache for detaching LV %s.", display_lvname(cache_lv));
+ return_0;
+ }
+
+ /*
+ * The main job of detaching the cache.
+ */
+
+ if (!detach_pool_lv(cache_seg)) {
+ log_error("Failed to detach cache from %s", display_lvname(cache_lv));
+ return_0;
+ }
+
+ cache_pool_lv->status &= ~LV_CACHE_SINGLE;
+
+ if (!remove_layer_from_lv(cache_lv, corigin_lv)) {
+ log_error("Failed to remove cache layer from %s", display_lvname(cache_lv));
+ return_0;
+ }
+
+ if (!lv_update_and_reload(cache_lv)) {
+ log_error("Failed to update and reload after detaching cache from %s", display_lvname(cache_lv));
+ return 0;
+ }
+
+ /*
+ * Detaching the cache is done, now finish cleaning up what's left over
+ * from when the cache was attached: deactivate the cache_pool_lv, and
+ * remove the unused dm dev for corigin_lv.
+ */
+
+ /* These cmeta/cdata dm devs need to be removed since they are using cache_pool_lv. */
+ if (!remove_cache_single_meta_data(cmd, &info_meta, &info_data))
+ log_error("Failed to remove cdata/cmeta devs for %s", display_lvname(cache_pool_lv));
+
+ if (!deactivate_lv(cmd, cache_pool_lv))
+ log_error("Failed to deactivate the detached cache %s", display_lvname(cache_pool_lv));
+
+ if (!corigin_info.major || !corigin_info.minor) {
+ log_error("Invalid device number %u:%u for corigin %s",
+ corigin_info.major, corigin_info.minor, display_lvname(corigin_lv));
+ return 1;
+ }
+
+ dm_udev_set_sync_support(0);
+
+ if (!dev_manager_remove_dm_major_minor(corigin_info.major, corigin_info.minor))
+ log_error("Failed to remove the unused corigin dev %s", display_lvname(corigin_lv));
+
+ dm_udev_set_sync_support(1);
+
+ if (!lv_remove(corigin_lv)) {
+ log_error("Failed to remove unused cache layer %s for %s",
+ display_lvname(corigin_lv),
+ display_lvname(cache_lv));
+ return_0;
+ }
+
+ return 1;
+}
+
+static int _lv_detach_cache_single_while_inactive(struct cmd_context *cmd, struct logical_volume *cache_lv)
+{
+ struct lv_segment *cache_seg = first_seg(cache_lv);
+ struct logical_volume *corigin_lv;
+ struct logical_volume *cache_pool_lv;
+ int cache_mode;
+ int is_clear;
+
+ cache_pool_lv = cache_seg->pool_lv;
+
+ corigin_lv = seg_lv(cache_seg, 0);
+
+ cache_mode = cache_seg->cache_mode;
+
+ /*
+ * With these modes there is no flush needed so we can immediately
+ * detach without temporarily activating the LV to flush it.
+ */
+ if ((cache_mode == CACHE_MODE_WRITETHROUGH) || (cache_mode == CACHE_MODE_PASSTHROUGH))
+ goto detach;
+
+ /*
+ * With mode WRITEBACK we need to activate the cache LV to flush/clean
+ * it before detaching the cache.
+ *
+ * LV_TEMPORARY should prevent the active LV from being exposed and
+ * used outside of lvm.
+ */
+
+ log_debug("Activating %s internally for cache flush.", display_lvname(cache_lv));
+
+ cache_lv->status |= LV_TEMPORARY;
+
+ if (!activate_lv(cmd, cache_lv)) {
+ log_error("Failed to activate LV %s to flush cache.", display_lvname(cache_lv));
+ return 0;
+ }
+
+ if (!lv_cache_wait_for_clean(cache_lv, &is_clear)) {
+ log_error("Failed to flush cache for detaching LV %s.", display_lvname(cache_lv));
+ return_0;
+ }
+
+ if (!deactivate_lv(cmd, cache_lv)) {
+ log_error("Failed to deactivate LV %s for detaching cache.", display_lvname(cache_lv));
+ return 0;
+ }
+ cache_lv->status &= ~LV_TEMPORARY;
+
+ detach:
+ if (!detach_pool_lv(cache_seg)) {
+ log_error("Failed to detach cache from %s", display_lvname(cache_lv));
+ return_0;
+ }
+
+ cache_pool_lv->status &= ~LV_CACHE_SINGLE;
+
+ if (!remove_layer_from_lv(cache_lv, corigin_lv)) {
+ log_error("Failed to remove cache layer from %s", display_lvname(cache_lv));
+ return_0;
+ }
+
+ if (!lv_remove(corigin_lv)) {
+ log_error("Failed to remove unused cache layer %s for %s",
+ display_lvname(corigin_lv),
+ display_lvname(cache_lv));
+ return_0;
+ }
+
+ return 1;
+}
+
+int lv_detach_cache_single(struct logical_volume *cache_lv)
+{
+ struct cmd_context *cmd = cache_lv->vg->cmd;
+
+ if (lv_is_pending_delete(cache_lv)) {
+ log_error("Already detaching cache pool from %s.", display_lvname(cache_lv));
+ return 0;
+ }
+
+ if (lv_is_active(cache_lv))
+ return _lv_detach_cache_single_while_active(cmd, cache_lv);
+ else
+ return _lv_detach_cache_single_while_inactive(cmd, cache_lv);
+}
+
/*
* lv_cache_remove
* @cache_lv
@@ -579,6 +760,11 @@ int lv_cache_remove(struct logical_volume *cache_lv)
return 0;
}
+ if (lv_is_cache_single(cache_seg->pool_lv)) {
+ log_error(INTERNAL_ERROR "Incorrect remove for cache single");
+ return 0;
+ }
+
if (lv_is_pending_delete(cache_lv)) {
log_debug(INTERNAL_ERROR "LV %s is already dropped cache volume.",
display_lvname(cache_lv));
@@ -763,7 +949,10 @@ int cache_set_policy(struct lv_segment *lvseg, const char *name,
return 1; /* Policy and settings can be selected later when caching LV */
}
- if (seg_is_cache_pool(lvseg))
+ if (seg_is_cache(lvseg) && lv_is_cache_single(lvseg->pool_lv))
+ seg = lvseg;
+
+ else if (seg_is_cache_pool(lvseg))
seg = lvseg;
else if (seg_is_cache(lvseg))
@@ -933,10 +1122,241 @@ int cache_set_metadata_format(struct lv_segment *seg, cache_metadata_format_t fo
return 1;
}
-/*
- * Universal 'wrapper' function do-it-all
- * to update all commonly specified cache parameters
- */
+#define ONE_MB_S 2048 /* 1MB in sectors */
+#define ONE_GB_S 2097152 /* 1GB in sectors */
+
+int cache_single_set_params(struct cmd_context *cmd,
+ struct logical_volume *cache_lv,
+ struct logical_volume *pool_lv,
+ uint64_t poolmetadatasize,
+ uint32_t chunk_size,
+ cache_metadata_format_t format,
+ cache_mode_t mode,
+ const char *policy,
+ const struct dm_config_tree *settings)
+{
+ struct dm_pool *mem = cache_lv->vg->vgmem;
+ struct profile *profile = cache_lv->profile;
+ struct lv_segment *cache_seg = first_seg(cache_lv);
+ struct logical_volume *corig_lv = seg_lv(cache_seg, 0);
+ const char *policy_name = NULL;
+ struct dm_config_node *policy_settings = NULL;
+ const struct dm_config_node *cns;
+ struct dm_config_node *cn;
+ uint64_t meta_size = 0;
+ uint64_t data_size = 0;
+ uint64_t max_chunks;
+ uint32_t min_meta_size;
+ uint32_t max_meta_size;
+ uint32_t extent_size;
+
+ /* all _size variables in units of sectors (512 bytes) */
+
+
+ /*
+ * cache format: only create new cache LVs with 2.
+ */
+
+ if (format == CACHE_METADATA_FORMAT_UNSELECTED)
+ format = CACHE_METADATA_FORMAT_2;
+ if (format == CACHE_METADATA_FORMAT_1) {
+ log_error("Use cache metadata format 2.");
+ return 0;
+ }
+
+
+ /*
+ * cache mode: get_cache_params() gets mode from --cachemode or sets
+ * UNSEL. When unspecified, it comes from config.
+ */
+
+ if (mode == CACHE_MODE_UNSELECTED)
+ mode = _get_cache_mode_from_config(cmd, profile, cache_lv);
+
+ cache_seg->cache_mode = mode;
+
+
+ /*
+ * chunk size: get_cache_params() get chunk_size from --chunksize or
+ * sets 0. When unspecified it comes from config or default.
+ *
+ * cache_pool_chunk_size in lvm.conf, DEFAULT_CACHE_POOL_CHUNK_SIZE,
+ * and DEFAULT_CACHE_POOL_MAX_METADATA_SIZE are in KiB, so *2 turn
+ * them into sectors.
+ */
+
+ if (!chunk_size)
+ chunk_size = find_config_tree_int(cmd, allocation_cache_pool_chunk_size_CFG, cache_lv->profile) * 2;
+
+ if (!chunk_size)
+ chunk_size = get_default_allocation_cache_pool_chunk_size_CFG(cmd, profile);
+
+ if (!validate_cache_chunk_size(cmd, chunk_size))
+ return_0;
+
+
+ /*
+ * metadata size: can be specified with --poolmetadatasize,
+ * otherwise it's set according to the size of the cache.
+ * data size: the LV size minus the metadata size.
+ */
+
+ extent_size = pool_lv->vg->extent_size;
+ min_meta_size = extent_size;
+ max_meta_size = 2 * DEFAULT_CACHE_POOL_MAX_METADATA_SIZE; /* 2x for KiB to sectors */
+
+ if (pool_lv->size < (extent_size * 2)) {
+ log_error("The minimum cache size is two extents (%s bytes).",
+ display_size(cmd, extent_size * 2));
+ return 0;
+ }
+
+ if (poolmetadatasize) {
+ meta_size = poolmetadatasize; /* in sectors, from --poolmetadatasize, see _size_arg() */
+
+ if (meta_size > max_meta_size) {
+ meta_size = max_meta_size;
+ log_print_unless_silent("Rounding down metadata size to max size %s",
+ display_size(cmd, meta_size));
+ }
+ if (meta_size < min_meta_size) {
+ meta_size = min_meta_size;
+ log_print_unless_silent("Rounding up metadata size to min size %s",
+ display_size(cmd, meta_size));
+ }
+
+ if (meta_size % extent_size) {
+ meta_size += extent_size - meta_size % extent_size;
+ log_print_unless_silent("Rounding up metadata size to full physical extent %s",
+ display_size(cmd, meta_size));
+ }
+ }
+
+ if (!meta_size) {
+ if (pool_lv->size < (128 * ONE_MB_S))
+ meta_size = 16 * ONE_MB_S;
+
+ else if (pool_lv->size < ONE_GB_S)
+ meta_size = 32 * ONE_MB_S;
+
+ else if (pool_lv->size < (128 * ONE_GB_S))
+ meta_size = 64 * ONE_MB_S;
+
+ if (meta_size > (pool_lv->size / 2))
+ meta_size = pool_lv->size / 2;
+
+ if (meta_size < min_meta_size)
+ meta_size = min_meta_size;
+
+ if (meta_size % extent_size)
+ meta_size += extent_size - meta_size % extent_size;
+ }
+
+ data_size = pool_lv->size - meta_size;
+
+ max_chunks = get_default_allocation_cache_pool_max_chunks_CFG(cmd, profile);
+
+ if (data_size / chunk_size > max_chunks) {
+ log_error("Cache data blocks %llu and chunk size %u exceed max chunks %llu.",
+ (unsigned long long)data_size, chunk_size, (unsigned long long)max_chunks);
+ log_error("Use smaller cache, larger --chunksize or increase max chunks setting.");
+ return 0;
+ }
+
+
+ /*
+ * cache policy: get_cache_params() gets policy from --cachepolicy,
+ * or sets NULL.
+ */
+
+ if (!policy)
+ policy = find_config_tree_str(cmd, allocation_cache_policy_CFG, profile);
+
+ if (!policy)
+ policy = _get_default_cache_policy(cmd);
+
+ if (!policy) {
+ log_error(INTERNAL_ERROR "Missing cache policy name.");
+ return 0;
+ }
+
+ if (!(policy_name = dm_pool_strdup(mem, policy)))
+ return_0;
+
+
+ /*
+ * cache settings: get_cache_params() gets policy from --cachesettings,
+ * or sets NULL.
+ * FIXME: code for this is a mess, mostly copied from cache_set_policy
+ * which is even worse.
+ */
+
+ if (settings) {
+ if ((cn = dm_config_find_node(settings->root, "policy_settings"))) {
+ if (!(policy_settings = dm_config_clone_node_with_mem(mem, cn, 0)))
+ return_0;
+ }
+ } else {
+ if ((cns = find_config_tree_node(cmd, allocation_cache_settings_CFG_SECTION, profile))) {
+ /* Try to find our section for given policy */
+ for (cn = cns->child; cn; cn = cn->sib) {
+ if (!cn->child)
+ continue; /* Ignore section without settings */
+
+ if (cn->v || strcmp(cn->key, policy_name) != 0)
+ continue; /* Ignore mismatching sections */
+
+ /* Clone nodes with policy name */
+ if (!(policy_settings = dm_config_clone_node_with_mem(mem, cn, 0)))
+ return_0;
+
+ /* Replace policy name key with 'policy_settings' */
+ policy_settings->key = "policy_settings";
+ break; /* Only first match counts */
+ }
+ }
+ }
+ restart: /* remove any 'default" nodes */
+ cn = policy_settings ? policy_settings->child : NULL;
+ while (cn) {
+ if (cn->v->type == DM_CFG_STRING && !strcmp(cn->v->v.str, "default")) {
+ dm_config_remove_node(policy_settings, cn);
+ goto restart;
+ }
+ cn = cn->sib;
+ }
+
+
+ log_debug("Setting LV %s cache on %s meta start 0 len %llu data start %llu len %llu sectors",
+ display_lvname(cache_lv), display_lvname(pool_lv),
+ (unsigned long long)meta_size,
+ (unsigned long long)meta_size,
+ (unsigned long long)data_size);
+ log_debug("Setting LV %s cache format %u policy %s chunk_size %u sectors",
+ display_lvname(cache_lv), format, policy_name, chunk_size);
+
+ if (lv_is_raid(corig_lv) && (mode == CACHE_MODE_WRITEBACK))
+ log_warn("WARNING: Data redundancy could be lost with writeback caching of raid logical volume!");
+
+ if (lv_is_thin_pool_data(cache_lv)) {
+ log_warn("WARNING: thin pool data will not be automatically extended when cached.");
+ log_warn("WARNING: manual splitcache is required before extending thin pool data.");
+ }
+
+ cache_seg->chunk_size = chunk_size;
+ cache_seg->metadata_start = 0;
+ cache_seg->metadata_len = meta_size;
+ cache_seg->data_start = meta_size;
+ cache_seg->data_len = data_size;
+ cache_seg->cache_metadata_format = format;
+ cache_seg->policy_name = policy_name;
+ cache_seg->policy_settings = policy_settings;
+ id_create(&cache_seg->metadata_id);
+ id_create(&cache_seg->data_id);
+
+ return 1;
+}
+
int cache_set_params(struct lv_segment *seg,
uint32_t chunk_size,
cache_metadata_format_t format,
@@ -1002,7 +1422,7 @@ int wipe_cache_pool(struct logical_volume *cache_pool_lv)
int r;
/* Only unused cache-pool could be activated and wiped */
- if (!lv_is_cache_pool(cache_pool_lv) ||
+ if ((!lv_is_cache_pool(cache_pool_lv) && !lv_is_cache_single(cache_pool_lv)) ||
!dm_list_empty(&cache_pool_lv->segs_using_this_lv)) {
log_error(INTERNAL_ERROR "Failed to wipe cache pool for volume %s.",
display_lvname(cache_pool_lv));
diff --git a/lib/metadata/lv.c b/lib/metadata/lv.c
index 9c8b028..cb064d8 100644
--- a/lib/metadata/lv.c
+++ b/lib/metadata/lv.c
@@ -333,6 +333,8 @@ uint64_t lvseg_chunksize(const struct lv_segment *seg)
if (lv_is_cow(seg->lv))
size = (uint64_t) find_snapshot(seg->lv)->chunk_size;
+ else if (seg_is_cache(seg) && lv_is_cache_single(seg->pool_lv))
+ size = (uint64_t) seg->chunk_size;
else if (seg_is_pool(seg))
size = (uint64_t) seg->chunk_size;
else if (seg_is_cache(seg))
@@ -932,10 +934,18 @@ uint64_t lv_origin_size(const struct logical_volume *lv)
uint64_t lv_metadata_size(const struct logical_volume *lv)
{
- struct lv_segment *seg = (lv_is_thin_pool(lv) || lv_is_cache_pool(lv)) ?
- first_seg(lv) : NULL;
+ struct lv_segment *seg;
+
+ if (!(seg = first_seg(lv)))
+ return 0;
+
+ if (seg_is_cache(seg) && lv_is_cache_single(seg->pool_lv))
+ return seg->metadata_len;
- return seg ? seg->metadata_lv->size : 0;
+ if (lv_is_thin_pool(lv) || lv_is_cache_pool(lv))
+ return seg->metadata_lv->size;
+
+ return 0;
}
char *lv_path_dup(struct dm_pool *mem, const struct logical_volume *lv)
@@ -1297,7 +1307,7 @@ char *lv_attr_dup_with_info_and_seg_status(struct dm_pool *mem, const struct lv_
if (lv_is_thin_pool(lv) || lv_is_thin_volume(lv))
repstr[6] = 't';
- else if (lv_is_cache_pool(lv) || lv_is_cache(lv) || lv_is_cache_origin(lv))
+ else if (lv_is_cache_pool(lv) || lv_is_cache_single(lv) || lv_is_cache(lv) || lv_is_cache_origin(lv))
repstr[6] = 'C';
else if (lv_is_raid_type(lv))
repstr[6] = 'r';
diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c
index 43e09a4..8e64dac 100644
--- a/lib/metadata/lv_manip.c
+++ b/lib/metadata/lv_manip.c
@@ -422,7 +422,7 @@ static int _lv_layout_and_role_cache(struct dm_pool *mem,
if (lv_is_cache(lv) &&
!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_CACHE]))
goto_bad;
- else if (lv_is_cache_pool(lv)) {
+ else if (lv_is_cache_pool(lv) || lv_is_cache_single(lv)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_CACHE]) ||
!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_POOL]))
goto_bad;
@@ -4449,6 +4449,7 @@ static int _rename_skip_pools_externals_cb(struct logical_volume *lv, void *data
{
if (lv_is_pool(lv) ||
lv_is_vdo_pool(lv) ||
+ lv_is_cache_single(lv) ||
lv_is_external_origin(lv))
return -1; /* and skip subLVs */
@@ -6147,6 +6148,13 @@ int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
if (!lockd_lv(cmd, lock_lv, "ex", LDLV_PERSISTENT))
return_0;
+ if (lv_is_cache(lv) && lv_is_cache_single(first_seg(lv)->pool_lv)) {
+ if (!lv_detach_cache_single(lv)) {
+ log_error("Failed to detach cache from %s", display_lvname(lv));
+ return 0;
+ }
+ }
+
/* FIXME Ensure not referred to by another existing LVs */
ask_discard = find_config_tree_bool(cmd, devices_issue_discards_CFG, NULL);
diff --git a/lib/metadata/merge.c b/lib/metadata/merge.c
index 035a56c..d95da1f 100644
--- a/lib/metadata/merge.c
+++ b/lib/metadata/merge.c
@@ -321,6 +321,8 @@ static void _check_lv_segment(struct logical_volume *lv, struct lv_segment *seg,
unsigned seg_count, int *error_count)
{
struct lv_segment *seg2;
+ struct lv_segment *cache_setting_seg = NULL;
+ int no_metadata_format = 0;
if (lv_is_mirror_image(lv) &&
(!(seg2 = find_mirror_seg(seg)) || !seg_is_mirrored(seg2)))
@@ -332,23 +334,31 @@ static void _check_lv_segment(struct logical_volume *lv, struct lv_segment *seg,
if (!seg->pool_lv) {
seg_error("is missing cache pool LV");
- } else if (!lv_is_cache_pool(seg->pool_lv))
+ } else if (!lv_is_cache_pool(seg->pool_lv) && !lv_is_cache_single(seg->pool_lv))
seg_error("is not referencing cache pool LV");
} else { /* !cache */
if (seg->cleaner_policy)
seg_error("sets cleaner_policy");
}
- if (seg_is_cache_pool(seg)) {
- if (!dm_list_empty(&seg->lv->segs_using_this_lv)) {
- switch (seg->cache_metadata_format) {
+ if (lv_is_cache(lv) && seg->pool_lv && lv_is_cache_single(seg->pool_lv)) {
+ cache_setting_seg = seg;
+ no_metadata_format = 1;
+ }
+
+ else if (lv_is_cache_pool(lv))
+ cache_setting_seg = seg;
+
+ if (cache_setting_seg) {
+ if (!dm_list_empty(&cache_setting_seg->lv->segs_using_this_lv)) {
+ switch (cache_setting_seg->cache_metadata_format) {
case CACHE_METADATA_FORMAT_2:
case CACHE_METADATA_FORMAT_1:
break;
default:
seg_error("has invalid cache metadata format");
}
- switch (seg->cache_mode) {
+ switch (cache_setting_seg->cache_mode) {
case CACHE_MODE_WRITETHROUGH:
case CACHE_MODE_WRITEBACK:
case CACHE_MODE_PASSTHROUGH:
@@ -356,17 +366,24 @@ static void _check_lv_segment(struct logical_volume *lv, struct lv_segment *seg,
default:
seg_error("has invalid cache's feature flag");
}
- if (!seg->policy_name)
+ if (!cache_setting_seg->policy_name)
seg_error("is missing cache policy name");
}
- if (!validate_cache_chunk_size(lv->vg->cmd, seg->chunk_size))
+
+ if (!validate_cache_chunk_size(lv->vg->cmd, cache_setting_seg->chunk_size))
seg_error("has invalid chunk size.");
- if (seg->lv->status & LV_METADATA_FORMAT) {
- if (seg->cache_metadata_format != CACHE_METADATA_FORMAT_2)
+
+ if (cache_setting_seg->lv->status & LV_METADATA_FORMAT) {
+ if (cache_setting_seg->cache_metadata_format != CACHE_METADATA_FORMAT_2)
seg_error("sets METADATA_FORMAT flag");
- } else if (seg->cache_metadata_format == CACHE_METADATA_FORMAT_2)
+ }
+
+ if (!no_metadata_format &&
+ (cache_setting_seg->cache_metadata_format == CACHE_METADATA_FORMAT_2) &&
+ !(cache_setting_seg->lv->status & LV_METADATA_FORMAT))
seg_error("is missing METADATA_FORMAT flag");
- } else { /* !cache_pool */
+
+ } else {
if (seg->cache_metadata_format)
seg_error("sets cache metadata format");
if (seg->cache_mode)
@@ -519,7 +536,8 @@ static void _check_lv_segment(struct logical_volume *lv, struct lv_segment *seg,
if (!seg_is_pool(seg) &&
/* FIXME: format_pool/import_export.c _add_linear_seg() sets chunk_size */
!seg_is_linear(seg) &&
- !seg_is_snapshot(seg)) {
+ !seg_is_snapshot(seg) &&
+ !seg_is_cache(seg)) {
if (seg->chunk_size)
seg_error("sets chunk_size");
}
@@ -757,6 +775,7 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
if ((seg_count != 1) &&
(lv_is_cache(lv) ||
lv_is_cache_pool(lv) ||
+ lv_is_cache_single(lv) ||
lv_is_raid(lv) ||
lv_is_snapshot(lv) ||
lv_is_thin_pool(lv) ||
diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h
index 76c164b..30ab356 100644
--- a/lib/metadata/metadata-exported.h
+++ b/lib/metadata/metadata-exported.h
@@ -151,6 +151,8 @@
#define LV_VDO_POOL UINT64_C(0x0000000040000000) /* LV - Internal user only */
#define LV_VDO_POOL_DATA UINT64_C(0x8000000000000000) /* LV - Internal user only */
+#define LV_CACHE_SINGLE UINT64_C(0x0010000000000000) /* LV - also a PV flag */
+
/* Format features flags */
#define FMT_SEGMENTS 0x00000001U /* Arbitrary segment params? */
@@ -245,10 +247,11 @@
#define lv_is_cache(lv) (((lv)->status & CACHE) ? 1 : 0)
#define lv_is_cache_pool(lv) (((lv)->status & CACHE_POOL) ? 1 : 0)
+#define lv_is_cache_single(lv) (((lv)->status & LV_CACHE_SINGLE) ? 1 : 0)
#define lv_is_used_cache_pool(lv) (lv_is_cache_pool(lv) && !dm_list_empty(&(lv)->segs_using_this_lv))
#define lv_is_cache_pool_data(lv) (((lv)->status & CACHE_POOL_DATA) ? 1 : 0)
#define lv_is_cache_pool_metadata(lv) (((lv)->status & CACHE_POOL_METADATA) ? 1 : 0)
-#define lv_is_cache_type(lv) (((lv)->status & (CACHE | CACHE_POOL | CACHE_POOL_DATA | CACHE_POOL_METADATA)) ? 1 : 0)
+#define lv_is_cache_type(lv) (((lv)->status & (CACHE | CACHE_POOL | LV_CACHE_SINGLE | CACHE_POOL_DATA | CACHE_POOL_METADATA)) ? 1 : 0)
#define lv_is_pool(lv) (((lv)->status & (CACHE_POOL | THIN_POOL)) ? 1 : 0)
#define lv_is_pool_data(lv) (((lv)->status & (CACHE_POOL_DATA | THIN_POOL_DATA)) ? 1 : 0)
@@ -493,6 +496,13 @@ struct lv_segment {
struct logical_volume *pool_lv; /* For thin, cache */
uint32_t device_id; /* For thin, 24bit */
+ uint64_t metadata_start; /* For cache */
+ uint64_t metadata_len; /* For cache */
+ uint64_t data_start; /* For cache */
+ uint64_t data_len; /* For cache */
+ struct id metadata_id; /* For cache */
+ struct id data_id; /* For cache */
+
cache_metadata_format_t cache_metadata_format;/* For cache_pool */
cache_mode_t cache_mode; /* For cache_pool */
const char *policy_name; /* For cache_pool */
@@ -1218,7 +1228,7 @@ struct lv_status_cache {
const char *cache_mode_num_to_str(cache_mode_t mode);
const char *display_cache_mode(const struct lv_segment *seg);
-const char *get_cache_mode_name(const struct lv_segment *pool_seg);
+const char *get_cache_mode_name(const struct lv_segment *seg);
int set_cache_mode(cache_mode_t *mode, const char *cache_mode);
int cache_set_cache_mode(struct lv_segment *seg, cache_mode_t mode);
int cache_set_metadata_format(struct lv_segment *seg, cache_metadata_format_t format);
@@ -1230,6 +1240,15 @@ int cache_set_params(struct lv_segment *seg,
cache_mode_t mode,
const char *policy_name,
const struct dm_config_tree *policy_settings);
+int cache_single_set_params(struct cmd_context *cmd,
+ struct logical_volume *cache_lv,
+ struct logical_volume *pool_lv,
+ uint64_t poolmetadatasize,
+ uint32_t chunk_size,
+ cache_metadata_format_t format,
+ cache_mode_t mode,
+ const char *policy,
+ const struct dm_config_tree *settings);
void cache_check_for_warns(const struct lv_segment *seg);
int update_cache_pool_params(struct cmd_context *cmd,
struct profile *profile,
@@ -1246,6 +1265,7 @@ struct logical_volume *lv_cache_create(struct logical_volume *pool_lv,
struct logical_volume *origin_lv);
int lv_cache_wait_for_clean(struct logical_volume *cache_lv, int *is_clean);
int lv_cache_remove(struct logical_volume *cache_lv);
+int lv_detach_cache_single(struct logical_volume *cache_lv);
int wipe_cache_pool(struct logical_volume *cache_pool_lv);
/* -- metadata/cache_manip.c */
diff --git a/lib/report/report.c b/lib/report/report.c
index 52baa6c..ecec0a3 100644
--- a/lib/report/report.c
+++ b/lib/report/report.c
@@ -1430,7 +1430,10 @@ static int _cache_settings_disp(struct dm_report *rh, struct dm_pool *mem,
struct _str_list_append_baton baton;
struct dm_list dummy_list; /* dummy list to display "nothing" */
- if (seg_is_cache_pool(seg))
+ if (seg_is_cache(seg) && lv_is_cache_single(seg->pool_lv))
+ setting_seg = seg;
+
+ else if (seg_is_cache_pool(seg))
setting_seg = seg;
else if (seg_is_cache(seg))
@@ -1565,7 +1568,10 @@ static int _cache_policy_disp(struct dm_report *rh, struct dm_pool *mem,
const struct lv_segment *seg = (const struct lv_segment *) data;
const struct lv_segment *setting_seg = NULL;
- if (seg_is_cache_pool(seg))
+ if (seg_is_cache(seg) && lv_is_cache_single(seg->pool_lv))
+ setting_seg = seg;
+
+ else if (seg_is_cache_pool(seg))
setting_seg = seg;
else if (seg_is_cache(seg))
@@ -2747,7 +2753,10 @@ static int _cachemetadataformat_disp(struct dm_report *rh, struct dm_pool *mem,
const struct lv_segment *setting_seg = NULL;
const uint64_t *fmt;
- if (seg_is_cache_pool(seg))
+ if (seg_is_cache(seg) && lv_is_cache_single(seg->pool_lv))
+ setting_seg = seg;
+
+ else if (seg_is_cache_pool(seg))
setting_seg = seg;
else if (seg_is_cache(seg))
@@ -3222,6 +3231,11 @@ static int _lvmetadatasize_disp(struct dm_report *rh, struct dm_pool *mem,
const struct logical_volume *lv = (const struct logical_volume *) data;
uint64_t size;
+ if (lv_is_cache(lv) && lv_is_cache_single(first_seg(lv)->pool_lv)) {
+ size = lv_metadata_size(lv);
+ return _size64_disp(rh, mem, field, &size, private);
+ }
+
if (lv_is_thin_pool(lv) || lv_is_cache_pool(lv)) {
size = lv_metadata_size(lv);
return _size64_disp(rh, mem, field, &size, private);
diff --git a/test/shell/cache-single-options.sh b/test/shell/cache-single-options.sh
new file mode 100644
index 0000000..f8dde12
--- /dev/null
+++ b/test/shell/cache-single-options.sh
@@ -0,0 +1,269 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2017 Red Hat, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v.2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# Test single lv cache options
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+mount_dir="mnt"
+mkdir -p $mount_dir
+
+# generate random data
+dmesg > pattern1
+ps aux >> pattern1
+
+aux prepare_devs 5 64
+
+vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5"
+
+lvcreate -n $lv1 -l 8 -an $vg "$dev1"
+lvcreate -n $lv2 -l 4 -an $vg "$dev2"
+lvcreate -n $lv3 -l 4 -an $vg "$dev3"
+lvcreate -n $lv4 -l 4 -an $vg "$dev4"
+lvcreate -n $lv5 -l 8 -an $vg "$dev5"
+
+mkfs_mount_umount()
+{
+ lvt=$1
+
+ lvchange -ay $vg/$lvt
+
+ mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lvt"
+ mount "$DM_DEV_DIR/$vg/$lvt" $mount_dir
+ cp pattern1 $mount_dir/pattern1
+ dd if=/dev/zero of=$mount_dir/zeros2M bs=1M count=2 oflag=sync
+ umount $mount_dir
+
+ lvchange -an $vg/$lvt
+}
+
+mount_umount()
+{
+ lvt=$1
+
+ lvchange -ay $vg/$lvt
+
+ mount "$DM_DEV_DIR/$vg/$lvt" $mount_dir
+ diff pattern1 $mount_dir/pattern1
+ dd if=$mount_dir/zeros2M of=/dev/null bs=1M count=2
+ umount $mount_dir
+
+ lvchange -an $vg/$lvt
+}
+
+#
+# Test --cachemetadataformat
+#
+
+# 1 shouldn't be used any longer
+not lvconvert --cachemetadataformat 1 -y --type cache --cachepool $lv2 $vg/$lv1
+
+# 3 doesn't exist
+not lvconvert --cachemetadataformat 3 -y --type cache --cachepool $lv2 $vg/$lv1
+
+# 2 is used by default
+lvconvert -y --type cache --cachepool $lv2 $vg/$lv1
+
+check lv_field $vg/$lv1 cachemetadataformat "2"
+
+lvconvert --splitcache $vg/$lv1
+check lv_field $vg/$lv1 segtype linear
+check lv_field $vg/$lv2 segtype linear
+
+# 2 can be set explicitly
+lvconvert --cachemetadataformat 2 -y --type cache --cachepool $lv2 $vg/$lv1
+
+check lv_field $vg/$lv1 cachemetadataformat "2"
+
+lvconvert --splitcache $vg/$lv1
+
+# "auto" means 2
+lvconvert --cachemetadataformat auto -y --type cache --cachepool $lv2 $vg/$lv1
+
+check lv_field $vg/$lv1 cachemetadataformat "2"
+
+mkfs_mount_umount $lv1
+
+lvconvert --splitcache $vg/$lv1
+check lv_field $vg/$lv1 segtype linear
+check lv_field $vg/$lv2 segtype linear
+mount_umount $lv1
+
+
+#
+# Test --poolmetadatasize
+#
+
+lvconvert -y --type cache --cachepool $lv2 --poolmetadatasize 4m $vg/$lv1
+
+check lv_field $vg/$lv1 lv_metadata_size "4.00m"
+
+mkfs_mount_umount $lv1
+
+lvconvert --splitcache $vg/$lv1
+check lv_field $vg/$lv1 segtype linear
+check lv_field $vg/$lv2 segtype linear
+mount_umount $lv1
+
+
+#
+# Test --chunksize
+#
+
+lvconvert -y --type cache --cachepool $lv2 --chunksize 32k $vg/$lv1
+
+check lv_field $vg/$lv1 chunksize "32.00k"
+
+mkfs_mount_umount $lv1
+
+lvconvert --splitcache $vg/$lv1
+check lv_field $vg/$lv1 segtype linear
+check lv_field $vg/$lv2 segtype linear
+mount_umount $lv1
+
+
+#
+# Test --cachemode
+#
+
+lvconvert -y --type cache --cachepool $lv2 --cachemode writethrough $vg/$lv1
+
+check lv_field $vg/$lv1 cachemode "writethrough"
+
+mkfs_mount_umount $lv1
+
+lvconvert --splitcache $vg/$lv1
+check lv_field $vg/$lv1 segtype linear
+check lv_field $vg/$lv2 segtype linear
+mount_umount $lv1
+
+# FIXME: kernel errors for other cache modes
+
+#lvconvert -y --type cache --cachepool $lv2 --cachemode passthrough $vg/$lv1
+
+#check lv_field $vg/$lv1 cachemode "passthrough"
+
+#mkfs_mount_umount $lv1
+
+#lvconvert --splitcache $vg/$lv1
+#check lv_field $vg/$lv1 segtype linear
+#check lv_field $vg/$lv2 segtype linear
+#mount_umount $lv1
+
+
+#lvconvert -y --type cache --cachepool $lv2 --cachemode writeback $vg/$lv1
+
+#check lv_field $vg/$lv1 cachemode "writeback"
+
+#mkfs_mount_umount $lv1
+
+#lvconvert --splitcache $vg/$lv1
+#check lv_field $vg/$lv1 segtype linear
+#check lv_field $vg/$lv2 segtype linear
+#mount_umount $lv1
+
+
+#
+# Test --cachepolicy
+#
+
+lvconvert -y --type cache --cachepool $lv2 --cachepolicy smq $vg/$lv1
+
+check lv_field $vg/$lv1 cachepolicy "smq"
+
+mkfs_mount_umount $lv1
+
+# FIXME: lvchange_cachepolicy sets wrong lv
+#lvchange --cachepolicy cleaner $vg/$lv1
+#lvchange -ay $vg/$lv1
+#check lv_field $vg/$lv1 cachepolicy "cleaner"
+#lvchange -an $vg/$lv1
+
+lvconvert --splitcache $vg/$lv1
+check lv_field $vg/$lv1 segtype linear
+check lv_field $vg/$lv2 segtype linear
+mount_umount $lv1
+
+
+#
+# Test --cachesettings
+# (only for mq policy, no settings for smq)
+#
+
+lvconvert -y --type cache --cachepool $lv2 --cachemode writethrough --cachepolicy mq --cachesettings 'migration_threshold = 233 sequential_threshold=13 random_threshold =1' $vg/$lv1
+
+check lv_field $vg/$lv1 cachemode "writethrough"
+check lv_field $vg/$lv1 cachepolicy "mq"
+
+lvs -o cachesettings $vg/$lv1 > settings
+grep "migration_threshold=233" settings
+grep "sequential_threshold=13" settings
+grep "random_threshold=1" settings
+
+mkfs_mount_umount $lv1
+
+lvconvert --splitcache $vg/$lv1
+check lv_field $vg/$lv1 segtype linear
+check lv_field $vg/$lv2 segtype linear
+mount_umount $lv1
+
+
+#
+# Test lvchange of --cachemode, --cachepolicy, --cachesettings
+#
+
+lvconvert -y --type cache --cachepool $lv2 $vg/$lv1
+
+lvchange -ay $vg/$lv1
+
+lvchange --cachemode writeback $vg/$lv1
+
+check lv_field $vg/$lv1 cachemode "writeback"
+
+lvchange --cachemode writethrough $vg/$lv1
+
+check lv_field $vg/$lv1 cachemode "writethrough"
+
+lvchange -an $vg/$lv1
+
+lvchange --cachepolicy mq --cachesettings 'migration_threshold=100' $vg/$lv1
+
+check lv_field $vg/$lv1 cachepolicy "mq"
+check lv_field $vg/$lv1 cachesettings "migration_threshold=100"
+
+lvconvert --splitcache $vg/$lv1
+
+
+#
+# Test --poolmetadata
+#
+
+# causes a cache-pool type LV to be created
+lvconvert -y --type cache --cachepool $lv3 --poolmetadata $lv4 $vg/$lv5
+
+lvs -a -o+segtype $vg
+
+check lv_field $vg/$lv5 segtype cache
+
+# check lv_field doesn't work for hidden lvs
+lvs -a -o segtype $vg/$lv3 > segtype
+grep cache-pool segtype
+
+lvconvert --splitcache $vg/$lv5
+check lv_field $vg/$lv5 segtype linear
+check lv_field $vg/$lv3 segtype cache-pool
+
+
+vgremove -ff $vg
+
diff --git a/test/shell/cache-single-thin.sh b/test/shell/cache-single-thin.sh
new file mode 100644
index 0000000..25c232f
--- /dev/null
+++ b/test/shell/cache-single-thin.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2017 Red Hat, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v.2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# Test single lv cache
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+aux prepare_devs 5 80
+
+vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5"
+
+# lv1 starts as a standard linear LV
+# lv1 is then sped up by attaching fast device lv2 using dm-cache
+# lv1 is then used as the data device in a thin pool
+
+lvcreate -L10 -an -n $lv1 $vg "$dev1"
+lvcreate -L10 -an -n $lv2 $vg "$dev2"
+
+lvconvert -y --type cache --cachepool $lv2 $vg/$lv1
+lvconvert -y --type thin-pool $vg/$lv1
+
+lvcreate --type thin -V10 -n lvthin --thinpool $vg/$lv1
+
+lvchange -an $vg/lvthin
+lvchange -an $vg/$lv1
+
+# detach the cache (lv2) from lv1
+
+lvconvert --splitcache $vg/$lv1
+
+vgremove -ff $vg
+
diff --git a/test/shell/cache-single-types.sh b/test/shell/cache-single-types.sh
new file mode 100644
index 0000000..472970a
--- /dev/null
+++ b/test/shell/cache-single-types.sh
@@ -0,0 +1,88 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2017 Red Hat, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v.2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# Test single lv cache with non-linear lvs
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+mount_dir="mnt"
+mkdir -p $mount_dir
+
+# generate random data
+dmesg > pattern1
+ps aux >> pattern1
+
+aux prepare_devs 4 64
+
+vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3" "$dev4"
+
+lvcreate --type raid1 -n $lv1 -l 8 -an $vg "$dev1" "$dev2"
+
+lvcreate --type raid1 -n $lv2 -l 4 -an $vg "$dev3" "$dev4"
+
+# test1: create fs on LV before cache is attached
+
+lvchange -ay $vg/$lv1
+
+mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
+
+mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+
+cp pattern1 $mount_dir/pattern1
+
+umount $mount_dir
+lvchange -an $vg/$lv1
+
+lvconvert -y --type cache --cachepool $lv2 $vg/$lv1
+
+check lv_field $vg/$lv1 segtype cache
+
+lvs -a $vg/$lv2 --noheadings -o segtype >out
+grep raid1 out
+
+lvchange -ay $vg/$lv1
+
+mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+
+diff pattern1 $mount_dir/pattern1
+
+cp pattern1 $mount_dir/pattern1b
+
+ls -l $mount_dir
+
+umount $mount_dir
+
+lvchange -an $vg/$lv1
+
+lvconvert --splitcache $vg/$lv1
+
+check lv_field $vg/$lv1 segtype raid1
+check lv_field $vg/$lv2 segtype raid1
+
+lvchange -ay $vg/$lv1
+lvchange -ay $vg/$lv2
+
+mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+
+ls -l $mount_dir
+
+diff pattern1 $mount_dir/pattern1
+diff pattern1 $mount_dir/pattern1b
+
+umount $mount_dir
+lvchange -an $vg/$lv1
+lvchange -an $vg/$lv2
+
+vgremove -ff $vg
+
diff --git a/test/shell/cache-single-usage.sh b/test/shell/cache-single-usage.sh
new file mode 100644
index 0000000..9636932
--- /dev/null
+++ b/test/shell/cache-single-usage.sh
@@ -0,0 +1,129 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2017 Red Hat, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v.2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# Test single lv cache
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+mount_dir="mnt"
+mkdir -p $mount_dir
+
+# generate random data
+dmesg > pattern1
+ps aux >> pattern1
+
+aux prepare_devs 2 64
+
+vgcreate $SHARED $vg "$dev1"
+
+vgextend $vg "$dev2"
+
+lvcreate -n $lv1 -l 8 -an $vg "$dev1"
+
+lvcreate -n $lv2 -l 4 -an $vg "$dev2"
+
+# test1: create fs on LV before cache is attached
+
+lvchange -ay $vg/$lv1
+
+mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
+
+mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+
+cp pattern1 $mount_dir/pattern1
+
+umount $mount_dir
+lvchange -an $vg/$lv1
+
+lvconvert -y --type cache --cachepool $lv2 $vg/$lv1
+
+check lv_field $vg/$lv1 segtype cache
+
+lvs -a $vg/$lv2 --noheadings -o segtype >out
+grep linear out
+
+lvchange -ay $vg/$lv1
+
+mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+
+diff pattern1 $mount_dir/pattern1
+
+cp pattern1 $mount_dir/pattern1b
+
+ls -l $mount_dir
+
+umount $mount_dir
+
+lvchange -an $vg/$lv1
+
+lvconvert --splitcache $vg/$lv1
+
+check lv_field $vg/$lv1 segtype linear
+check lv_field $vg/$lv2 segtype linear
+
+lvchange -ay $vg/$lv1
+lvchange -ay $vg/$lv2
+
+mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+
+ls -l $mount_dir
+
+diff pattern1 $mount_dir/pattern1
+diff pattern1 $mount_dir/pattern1b
+
+umount $mount_dir
+lvchange -an $vg/$lv1
+lvchange -an $vg/$lv2
+
+# test2: create fs on LV after cache is attached
+
+lvconvert -y --type cache --cachepool $lv2 $vg/$lv1
+
+check lv_field $vg/$lv1 segtype cache
+
+lvs -a $vg/$lv2 --noheadings -o segtype >out
+grep linear out
+
+lvchange -ay $vg/$lv1
+
+mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
+
+mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+
+cp pattern1 $mount_dir/pattern1
+ls -l $mount_dir
+
+umount $mount_dir
+lvchange -an $vg/$lv1
+
+lvconvert --splitcache $vg/$lv1
+
+check lv_field $vg/$lv1 segtype linear
+check lv_field $vg/$lv2 segtype linear
+
+lvchange -ay $vg/$lv1
+lvchange -ay $vg/$lv2
+
+mount "$DM_DEV_DIR/$vg/$lv1" $mount_dir
+
+ls -l $mount_dir
+
+diff pattern1 $mount_dir/pattern1
+
+umount $mount_dir
+lvchange -an $vg/$lv1
+lvchange -an $vg/$lv2
+
+vgremove -ff $vg
+
diff --git a/test/shell/lvconvert-cache-raid.sh b/test/shell/lvconvert-cache-raid.sh
index fa49163..465def7 100644
--- a/test/shell/lvconvert-cache-raid.sh
+++ b/test/shell/lvconvert-cache-raid.sh
@@ -84,22 +84,31 @@ lvremove -f $vg
# Test up/down raid conversion of cache pool data and metadata
-lvcreate --type cache-pool $vg/cpool -l 10
-lvcreate -H -n corigin --cachepool $vg/cpool -l 20 $vg
-
-lvconvert -y -m +1 --type raid1 $vg/cpool_cmeta
-check lv_field $vg/cpool_cmeta layout "raid,raid1"
-check lv_field $vg/cpool_cmeta role "private,cache,pool,metadata"
-
-lvconvert -y -m +1 --type raid1 $vg/cpool_cdata
-check lv_field $vg/cpool_cdata layout "raid,raid1"
-check lv_field $vg/cpool_cdata role "private,cache,pool,data"
-
-not lvconvert -m -1 $vg/cpool_cmeta
-lvconvert -y -m -1 $vg/cpool_cmeta
-check lv_field $vg/cpool_cmeta layout "linear"
-lvconvert -y -m -1 $vg/cpool_cdata
-check lv_field $vg/cpool_cdata layout "linear"
+
+lvcreate -l 10 -n cp1 $vg
+lvconvert -y --type cache-pool $vg/cp1
+
+lvcreate -l 20 -n co1 $vg
+lvconvert -y --type cache --cachepool cp1 $vg/co1
+
+lvconvert -y -m +1 --type raid1 $vg/cp1_cmeta
+check lv_field $vg/cp1_cmeta layout "raid,raid1"
+check lv_field $vg/cp1_cmeta role "private,cache,pool,metadata"
+
+lvconvert -y -m +1 --type raid1 $vg/cp1_cdata
+check lv_field $vg/cp1_cdata layout "raid,raid1"
+check lv_field $vg/cp1_cdata role "private,cache,pool,data"
+
+sleep 5
+
+lvs -a -o+devices $vg
+
+not lvconvert -m -1 $vg/cp1_cmeta
+
+lvconvert -y -m -1 $vg/cp1_cmeta
+check lv_field $vg/cp1_cmeta layout "linear"
+lvconvert -y -m -1 $vg/cp1_cdata
+check lv_field $vg/cp1_cdata layout "linear"
lvremove -f $vg
diff --git a/test/shell/lvconvert-cache.sh b/test/shell/lvconvert-cache.sh
index b2a2920..0c38dd0 100644
--- a/test/shell/lvconvert-cache.sh
+++ b/test/shell/lvconvert-cache.sh
@@ -104,6 +104,7 @@ lvcreate -n pool -l 10 $vg
lvs -a -o +devices
fail lvconvert --type cache --cachepool $vg/pool $vg/corigin
lvconvert --yes --cache --cachepool $vg/pool $vg/corigin
+lvconvert --splitcache $vg/corigin
lvremove -ff $vg
# Check we also support conversion that uses 'cleaner' cache policy
diff --git a/test/shell/lvrename-cache-thin.sh b/test/shell/lvrename-cache-thin.sh
index 8e9bd78..0697d1c 100644
--- a/test/shell/lvrename-cache-thin.sh
+++ b/test/shell/lvrename-cache-thin.sh
@@ -26,6 +26,8 @@ lvcreate -L10 -n cpool $vg
lvcreate -L10 -n tpool $vg
lvcreate -L10 -n $lv1 $vg
+lvconvert --yes --type cache-pool $vg/cpool
+
lvconvert --yes --cache --cachepool cpool $vg/tpool
# currently the only allowed stacking is cache thin data volume
diff --git a/tools/lvchange.c b/tools/lvchange.c
index 52b3bda..537e582 100644
--- a/tools/lvchange.c
+++ b/tools/lvchange.c
@@ -643,7 +643,10 @@ static int _lvchange_cache(struct cmd_context *cmd,
seg = first_seg(lv);
- if (seg_is_cache_pool(seg))
+ if (seg_is_cache(seg) && lv_is_cache_single(seg->pool_lv))
+ setting_seg = seg;
+
+ else if (seg_is_cache_pool(seg))
setting_seg = seg;
else if (seg_is_cache(seg))
diff --git a/tools/lvconvert.c b/tools/lvconvert.c
index dbc5ab0..7382ce0 100644
--- a/tools/lvconvert.c
+++ b/tools/lvconvert.c
@@ -1844,14 +1844,20 @@ static int _lvconvert_split_and_keep_cachepool(struct cmd_context *cmd,
struct logical_volume *lv,
struct logical_volume *cachepool_lv)
{
- log_debug("Detaching cache pool %s from cache LV %s.",
- display_lvname(cachepool_lv), display_lvname(lv));
+ struct lv_segment *cache_seg = first_seg(lv);
+
+ log_debug("Detaching cache %s from LV %s.", display_lvname(cachepool_lv), display_lvname(lv));
if (!archive(lv->vg))
return_0;
- if (!lv_cache_remove(lv))
- return_0;
+ if (lv_is_cache_single(cache_seg->pool_lv)) {
+ if (!lv_detach_cache_single(lv))
+ return_0;
+ } else {
+ if (!lv_cache_remove(lv))
+ return_0;
+ }
if (!vg_write(lv->vg) || !vg_commit(lv->vg))
return_0;
@@ -2429,6 +2435,11 @@ static int _lvconvert_cache_repair(struct cmd_context *cmd,
struct logical_volume *pmslv;
struct logical_volume *mlv;
+ if (lv_is_cache(cache_lv) && lv_is_cache_single(first_seg(cache_lv)->pool_lv)) {
+ log_error("Manual repair required.");
+ return_0;
+ }
+
pool_lv = lv_is_cache_pool(cache_lv) ? cache_lv : first_seg(cache_lv)->pool_lv;
mlv = first_seg(pool_lv)->metadata_lv;
@@ -3357,25 +3368,100 @@ revert_new_lv:
#endif
}
-static int _lvconvert_to_cache_vol(struct cmd_context *cmd,
- struct logical_volume *lv,
- struct logical_volume *cachepool_lv)
+static int _cache_single_attach(struct cmd_context *cmd,
+ struct logical_volume *lv,
+ struct logical_volume *lv_fast)
{
+ struct volume_group *vg = lv->vg;
struct logical_volume *cache_lv;
uint32_t chunk_size = 0;
+ uint64_t poolmetadatasize = 0;
cache_metadata_format_t cache_metadata_format;
cache_mode_t cache_mode;
const char *policy_name;
struct dm_config_tree *policy_settings = NULL;
+ char *lockd_fast_args = NULL;
+ char *lockd_fast_name = NULL;
+ struct id lockd_fast_id;
int r = 0;
- if (_raid_split_image_conversion(lv))
- return 0;
-
- /* If LV is inactive here, ensure it's not active elsewhere. */
- if (!lockd_lv(cmd, lv, "ex", 0))
+ if (!validate_lv_cache_create_pool(lv_fast))
return_0;
+ if (!get_cache_params(cmd, &chunk_size, &cache_metadata_format, &cache_mode, &policy_name, &policy_settings))
+ goto_out;
+
+ if (!archive(vg))
+ goto_out;
+
+ /*
+ * Changes the vg struct to match the desired state.
+ *
+ * - lv == cache_lv, which keeps existing lv name and id, gets new
+ * segment with segtype "cache".
+ *
+ * - lv_fast keeps its existing name and id, becomes hidden.
+ *
+ * - lv_corig gets new name (existing name + _corig suffix),
+ * gets new id, becomes hidden, gets segments from lv.
+ */
+
+ if (!(cache_lv = lv_cache_create(lv_fast, lv)))
+ goto_out;
+
+ if (arg_is_set(cmd, poolmetadatasize_ARG))
+ poolmetadatasize = arg_uint64_value(cmd, poolmetadatasize_ARG, 0);
+
+ if (!cache_single_set_params(cmd, cache_lv, lv_fast, poolmetadatasize, chunk_size, cache_metadata_format, cache_mode, policy_name, policy_settings))
+ goto_out;
+
+ /*
+ * lv/cache_lv keeps the same lockd lock it had before, the lock for
+ * lv_fast is freed, and lv_corig has no lock.
+ */
+
+ if (vg_is_shared(vg) && lv_fast->lock_args) {
+ lockd_fast_args = dm_pool_strdup(cmd->mem, lv_fast->lock_args);
+ lockd_fast_name = dm_pool_strdup(cmd->mem, lv_fast->name);
+ memcpy(&lockd_fast_id, &lv_fast->lvid.id[1], sizeof(struct id));
+ lv_fast->lock_args = NULL;
+ }
+
+ /*
+ * vg_write(), suspend_lv(), vg_commit(), resume_lv(),
+ * where the old LV is suspended and the new LV is resumed.
+ */
+
+ if (!lv_update_and_reload(cache_lv))
+ goto_out;
+
+ if (lockd_fast_name) {
+ /* unlock and free lockd lock for lv_fast */
+ if (!lockd_lv_name(cmd, vg, lockd_fast_name, &lockd_fast_id, lockd_fast_args, "un", LDLV_PERSISTENT))
+ log_error("Failed to unlock fast LV %s/%s", vg->name, lockd_fast_name);
+ lockd_free_lv(cmd, vg, lockd_fast_name, &lockd_fast_id, lockd_fast_args);
+ }
+
+ r = 1;
+out:
+ if (policy_settings)
+ dm_config_destroy(policy_settings);
+
+ return r;
+}
+
+static int _cache_pool_attach(struct cmd_context *cmd,
+ struct logical_volume *lv,
+ struct logical_volume *cachepool_lv)
+{
+ struct logical_volume *cache_lv;
+ uint32_t chunk_size = 0;
+ cache_metadata_format_t cache_metadata_format;
+ cache_mode_t cache_mode;
+ const char *policy_name;
+ struct dm_config_tree *policy_settings = NULL;
+ int r = 0;
+
if (!validate_lv_cache_create_pool(cachepool_lv))
return_0;
@@ -3394,8 +3480,6 @@ static int _lvconvert_to_cache_vol(struct cmd_context *cmd,
if (!lv_update_and_reload(cache_lv))
goto_bad;
- log_print_unless_silent("Logical volume %s is now cached.",
- display_lvname(cache_lv));
r = 1;
bad:
if (policy_settings)
@@ -4018,9 +4102,9 @@ int lvconvert_to_pool_cmd(struct cmd_context *cmd, int argc, char **argv)
NULL, NULL, &_lvconvert_to_pool_single);
}
-static int _lvconvert_to_cache_vol_single(struct cmd_context *cmd,
- struct logical_volume *lv,
- struct processing_handle *handle)
+static int _lvconvert_cache_attach_single(struct cmd_context *cmd,
+ struct logical_volume *lv,
+ struct processing_handle *handle)
{
struct volume_group *vg = lv->vg;
struct logical_volume *cachepool_lv;
@@ -4037,12 +4121,16 @@ static int _lvconvert_to_cache_vol_single(struct cmd_context *cmd,
goto out;
}
+ /* Ensure the LV is not active elsewhere. */
+ if (!lockd_lv(cmd, lv, "ex", 0))
+ goto_out;
+
/*
* If cachepool_lv is not yet a cache pool, convert it to one.
* If using an existing cache pool, wipe it.
*/
- if (!lv_is_cache_pool(cachepool_lv)) {
+ if (!lv_is_cache_pool(cachepool_lv) && arg_is_set(cmd, poolmetadata_ARG)) {
int lvt_enum = get_lvt_enum(cachepool_lv);
struct lv_type *lvtype = get_lv_type(lvt_enum);
@@ -4073,6 +4161,28 @@ static int _lvconvert_to_cache_vol_single(struct cmd_context *cmd,
log_error("LV %s is not a cache pool.", display_lvname(cachepool_lv));
goto out;
}
+
+ } else if (!lv_is_cache_pool(cachepool_lv)) {
+
+ if (!dm_list_empty(&cachepool_lv->segs_using_this_lv)) {
+ log_error("LV %s is already in use.", display_lvname(cachepool_lv));
+ goto out;
+ }
+
+ if (!arg_is_set(cmd, yes_ARG) &&
+ yes_no_prompt("Erase all existing data on %s? [y/n]: ", display_lvname(cachepool_lv)) == 'n') {
+ log_error("Conversion aborted.");
+ goto out;
+ }
+
+ /* Ensure the LV is not active elsewhere. */
+ if (!lockd_lv(cmd, cachepool_lv, "ex", LDLV_PERSISTENT))
+ goto_out;
+
+ cachepool_lv->status |= LV_CACHE_SINGLE;
+
+ if (!wipe_cache_pool(cachepool_lv))
+ goto_out;
} else {
if (!dm_list_empty(&cachepool_lv->segs_using_this_lv)) {
log_error("Cache pool %s is already in use.", cachepool_name);
@@ -4108,10 +4218,25 @@ static int _lvconvert_to_cache_vol_single(struct cmd_context *cmd,
log_verbose("Redirecting operation to data sub LV %s.", display_lvname(lv));
}
- /* Convert lv to cache vol using cachepool_lv. */
+ if (_raid_split_image_conversion(lv))
+ goto_out;
+
+ /* Attach the cache to the main LV. */
+
+ if (lv_is_cache_single(cachepool_lv)) {
+ if (!_cache_single_attach(cmd, lv, cachepool_lv))
+ goto_out;
+
+ } else if (lv_is_cache_pool(cachepool_lv)) {
+ if (!_cache_pool_attach(cmd, lv, cachepool_lv))
+ goto_out;
- if (!_lvconvert_to_cache_vol(cmd, lv, cachepool_lv))
+ } else {
+ log_error(INTERNAL_ERROR "Invalid cache pool state for %s", cachepool_lv->name);
goto_out;
+ }
+
+ log_print_unless_silent("Logical volume %s is now cached.", display_lvname(lv));
return ECMD_PROCESSED;
@@ -4122,7 +4247,7 @@ static int _lvconvert_to_cache_vol_single(struct cmd_context *cmd,
int lvconvert_to_cache_vol_cmd(struct cmd_context *cmd, int argc, char **argv)
{
return process_each_lv(cmd, 1, cmd->position_argv, NULL, NULL, READ_FOR_UPDATE,
- NULL, NULL, &_lvconvert_to_cache_vol_single);
+ NULL, NULL, &_lvconvert_cache_attach_single);
}
static int _lvconvert_to_thin_with_external_single(struct cmd_context *cmd,
@@ -4415,6 +4540,13 @@ static int _lvconvert_split_cachepool_single(struct cmd_context *cmd,
return ECMD_FAILED;
}
+ if ((cmd->command->command_enum == lvconvert_split_and_remove_cachepool_CMD) &&
+ lv_is_cache_single(cachepool_lv)) {
+ log_error("Detach cache from %s with --splitcache.", display_lvname(lv));
+ log_error("The cache %s may then be removed with lvremove.", display_lvname(cachepool_lv));
+ return 0;
+ }
+
/* If LV is inactive here, ensure it's not active elsewhere. */
if (!lockd_lv(cmd, cache_lv, "ex", 0))
return_0;
diff --git a/tools/vgsplit.c b/tools/vgsplit.c
index 5824c82..fc99d2e 100644
--- a/tools/vgsplit.c
+++ b/tools/vgsplit.c
@@ -402,7 +402,10 @@ static int _move_cache(struct volume_group *vg_from,
/* NOTREACHED */
- if (lv_is_cache(lv)) {
+ if (lv_is_cache(lv) && lv_is_cache_single(seg->pool_lv)) {
+ log_error("Cannot split while LV %s has cache attached.", display_lvname(lv));
+ return 0;
+ } else if (lv_is_cache(lv)) {
orig = seg_lv(seg, 0);
data = seg_lv(first_seg(seg->pool_lv), 0);
meta = first_seg(seg->pool_lv)->metadata_lv;
5 years, 5 months
master - cache: reorganize cache_set_policy
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=a686391eca557fc35eb...
Commit: a686391eca557fc35eb90c1ef4cdc57418b6ee19
Parent: 23948e99b3d723f09456f75c93655817f87f1a82
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Mon Nov 5 16:14:45 2018 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Tue Nov 6 11:36:29 2018 -0600
cache: reorganize cache_set_policy
to prepare for future addition
---
lib/metadata/cache_manip.c | 21 ++++++++++++++-------
1 files changed, 14 insertions(+), 7 deletions(-)
diff --git a/lib/metadata/cache_manip.c b/lib/metadata/cache_manip.c
index 66f759f..8e8e704 100644
--- a/lib/metadata/cache_manip.c
+++ b/lib/metadata/cache_manip.c
@@ -748,23 +748,30 @@ static cache_metadata_format_t _get_default_cache_metadata_format(struct cmd_con
return f;
}
-int cache_set_policy(struct lv_segment *seg, const char *name,
+int cache_set_policy(struct lv_segment *lvseg, const char *name,
const struct dm_config_tree *settings)
{
+ struct lv_segment *seg;
struct dm_config_node *cn;
const struct dm_config_node *cns;
struct dm_config_tree *old = NULL, *new = NULL, *tmp = NULL;
int r = 0;
- struct profile *profile = seg->lv->profile;
+ struct profile *profile = lvseg->lv->profile;
- if (seg_is_cache(seg))
- seg = first_seg(seg->pool_lv);
- else if (seg_is_cache_pool(seg)) {
+ if (seg_is_cache_pool(lvseg)) {
if (!name && !settings)
return 1; /* Policy and settings can be selected later when caching LV */
- } else {
+ }
+
+ if (seg_is_cache_pool(lvseg))
+ seg = lvseg;
+
+ else if (seg_is_cache(lvseg))
+ seg = first_seg(lvseg->pool_lv);
+
+ else {
log_error(INTERNAL_ERROR "Cannot set cache metadata format for non cache volume %s.",
- display_lvname(seg->lv));
+ display_lvname(lvseg->lv));
return 0;
}
5 years, 5 months
master - cache: factor lvchange_cache
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=8c9d9a744605e37799a...
Commit: 8c9d9a744605e37799a2475932ae7dfd43831d08
Parent: e548e7c29d18034d153dc2ebe4ff0cc6bb99dc81
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Mon Nov 5 16:38:08 2018 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Tue Nov 6 11:36:34 2018 -0600
cache: factor lvchange_cache
to prepare for future addition
---
tools/lvchange.c | 20 ++++++++++++++------
1 files changed, 14 insertions(+), 6 deletions(-)
diff --git a/tools/lvchange.c b/tools/lvchange.c
index 07a578b..52b3bda 100644
--- a/tools/lvchange.c
+++ b/tools/lvchange.c
@@ -636,18 +636,26 @@ static int _lvchange_cache(struct cmd_context *cmd,
cache_mode_t mode;
const char *name;
struct dm_config_tree *settings = NULL;
- struct lv_segment *pool_seg = first_seg(lv);
+ struct lv_segment *seg;
+ struct lv_segment *setting_seg = NULL;
int r = 0, is_clean;
uint32_t chunk_size = 0; /* FYI: lvchange does NOT support its change */
- if (lv_is_cache(lv))
- pool_seg = first_seg(pool_seg->pool_lv);
+ seg = first_seg(lv);
+
+ if (seg_is_cache_pool(seg))
+ setting_seg = seg;
+
+ else if (seg_is_cache(seg))
+ setting_seg = first_seg(seg->pool_lv);
+ else
+ goto_out;
if (!get_cache_params(cmd, &chunk_size, &format, &mode, &name, &settings))
goto_out;
if ((mode != CACHE_MODE_UNSELECTED) &&
- (mode != pool_seg->cache_mode) &&
+ (mode != setting_seg->cache_mode) &&
lv_is_cache(lv)) {
if (!lv_cache_wait_for_clean(lv, &is_clean))
return_0;
@@ -658,11 +666,11 @@ static int _lvchange_cache(struct cmd_context *cmd,
}
}
- if (mode && !cache_set_cache_mode(first_seg(lv), mode))
+ if (mode && !cache_set_cache_mode(seg, mode))
goto_out;
if ((name || settings) &&
- !cache_set_policy(first_seg(lv), name, settings))
+ !cache_set_policy(seg, name, settings))
goto_out;
/* Request caller to commit and reload metadata */
5 years, 5 months
master - cache: factor report functions
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=e548e7c29d18034d153...
Commit: e548e7c29d18034d153dc2ebe4ff0cc6bb99dc81
Parent: a686391eca557fc35eb90c1ef4cdc57418b6ee19
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Mon Nov 5 16:33:34 2018 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Tue Nov 6 11:36:29 2018 -0600
cache: factor report functions
to prepare for future addition
---
lib/report/report.c | 65 ++++++++++++++++++++++++++------------------------
1 files changed, 34 insertions(+), 31 deletions(-)
diff --git a/lib/report/report.c b/lib/report/report.c
index 369f47c..52baa6c 100644
--- a/lib/report/report.c
+++ b/lib/report/report.c
@@ -1424,24 +1424,19 @@ static int _cache_settings_disp(struct dm_report *rh, struct dm_pool *mem,
const void *data, void *private)
{
const struct lv_segment *seg = (const struct lv_segment *) data;
+ const struct lv_segment *setting_seg = NULL;
const struct dm_config_node *settings;
struct dm_list *result;
struct _str_list_append_baton baton;
struct dm_list dummy_list; /* dummy list to display "nothing" */
- if (seg_is_cache(seg))
- seg = first_seg(seg->pool_lv);
- else if (!seg_is_cache_pool(seg)) {
- dm_list_init(&dummy_list);
- return _field_set_string_list(rh, field, &dummy_list, private, 0, NULL);
- /* TODO: once we have support for STR_LIST reserved values, replace with:
- * return _field_set_value(field, GET_FIRST_RESERVED_NAME(cache_settings_undef), GET_FIELD_RESERVED_VALUE(cache_settings_undef));
- */
- }
+ if (seg_is_cache_pool(seg))
+ setting_seg = seg;
+
+ else if (seg_is_cache(seg))
+ setting_seg = first_seg(seg->pool_lv);
- if (seg->policy_settings)
- settings = seg->policy_settings->child;
- else {
+ if (!setting_seg || !setting_seg->policy_settings) {
dm_list_init(&dummy_list);
return _field_set_string_list(rh, field, &dummy_list, private, 0, NULL);
/* TODO: once we have support for STR_LIST reserved values, replace with:
@@ -1449,6 +1444,8 @@ static int _cache_settings_disp(struct dm_report *rh, struct dm_pool *mem,
*/
}
+ settings = setting_seg->policy_settings->child;
+
if (!(result = str_list_create(mem)))
return_0;
@@ -1566,19 +1563,19 @@ static int _cache_policy_disp(struct dm_report *rh, struct dm_pool *mem,
const void *data, void *private)
{
const struct lv_segment *seg = (const struct lv_segment *) data;
+ const struct lv_segment *setting_seg = NULL;
- if (seg_is_cache(seg))
- seg = first_seg(seg->pool_lv);
- else if (!seg_is_cache_pool(seg) || !seg->policy_name)
+ if (seg_is_cache_pool(seg))
+ setting_seg = seg;
+
+ else if (seg_is_cache(seg))
+ setting_seg = first_seg(seg->pool_lv);
+
+ if (!setting_seg || !setting_seg->policy_name)
return _field_set_value(field, GET_FIRST_RESERVED_NAME(cache_policy_undef),
GET_FIELD_RESERVED_VALUE(cache_policy_undef));
- if (!seg->policy_name) {
- log_error(INTERNAL_ERROR "Unexpected NULL policy name.");
- return 0;
- }
-
- return _field_string(rh, field, seg->policy_name);
+ return _field_string(rh, field, setting_seg->policy_name);
}
static int _modules_disp(struct dm_report *rh, struct dm_pool *mem,
@@ -2747,21 +2744,27 @@ static int _cachemetadataformat_disp(struct dm_report *rh, struct dm_pool *mem,
const void *data, void *private)
{
const struct lv_segment *seg = (const struct lv_segment *) data;
+ const struct lv_segment *setting_seg = NULL;
const uint64_t *fmt;
- if (seg_is_cache(seg))
- seg = first_seg(seg->pool_lv);
+ if (seg_is_cache_pool(seg))
+ setting_seg = seg;
- if (seg_is_cache_pool(seg)) {
- switch (seg->cache_metadata_format) {
- case CACHE_METADATA_FORMAT_1:
- case CACHE_METADATA_FORMAT_2:
- fmt = (seg->cache_metadata_format == CACHE_METADATA_FORMAT_2) ? &_two64 : &_one64;
- return dm_report_field_uint64(rh, field, fmt);
- default: /* unselected/undefined for all other cases */;
- }
+ else if (seg_is_cache(seg))
+ setting_seg = first_seg(seg->pool_lv);
+
+ else
+ goto undef;
+
+ switch (setting_seg->cache_metadata_format) {
+ case CACHE_METADATA_FORMAT_1:
+ case CACHE_METADATA_FORMAT_2:
+ fmt = (setting_seg->cache_metadata_format == CACHE_METADATA_FORMAT_2) ? &_two64 : &_one64;
+ return dm_report_field_uint64(rh, field, fmt);
+ default: /* unselected/undefined for all other cases */;
}
+ undef:
return _field_set_value(field, "", &GET_TYPE_RESERVED_VALUE(num_undef_64));
}
5 years, 5 months
master - cache: improve error message about flush
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=23948e99b3d723f0945...
Commit: 23948e99b3d723f09456f75c93655817f87f1a82
Parent: 3e547fa9528e119205fcf182aecccd49c3695547
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Mon Nov 5 16:10:49 2018 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Tue Nov 6 11:36:29 2018 -0600
cache: improve error message about flush
---
lib/metadata/cache_manip.c | 3 +--
1 files changed, 1 insertions(+), 2 deletions(-)
diff --git a/lib/metadata/cache_manip.c b/lib/metadata/cache_manip.c
index 6b87873..66f759f 100644
--- a/lib/metadata/cache_manip.c
+++ b/lib/metadata/cache_manip.c
@@ -608,8 +608,7 @@ int lv_cache_remove(struct logical_volume *cache_lv)
cache_lv->status |= LV_TEMPORARY;
if (!activate_lv(cache_lv->vg->cmd, cache_lv) ||
!lv_is_active(cache_lv)) {
- log_error("Failed to active cache locally %s.",
- display_lvname(cache_lv));
+ log_error("Failed to activate %s to flush cache.", display_lvname(cache_lv));
return 0;
}
cache_lv->status &= ~LV_TEMPORARY;
5 years, 5 months
master - cache: improve warning message about cached thin data
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=3e547fa9528e119205f...
Commit: 3e547fa9528e119205fcf182aecccd49c3695547
Parent: 5ee1727f808e76f59637d78f2691aadc25d49b17
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Mon Nov 5 16:08:31 2018 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Tue Nov 6 11:36:28 2018 -0600
cache: improve warning message about cached thin data
---
lib/metadata/cache_manip.c | 7 ++++---
1 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/lib/metadata/cache_manip.c b/lib/metadata/cache_manip.c
index b4d269d..6b87873 100644
--- a/lib/metadata/cache_manip.c
+++ b/lib/metadata/cache_manip.c
@@ -171,9 +171,10 @@ void cache_check_for_warns(const struct lv_segment *seg)
log_warn("WARNING: Data redundancy could be lost with writeback "
"caching of raid logical volume!");
- if (lv_is_thin_pool_data(seg->lv))
- log_warn("WARNING: Cached thin pool's data cannot be currently "
- "resized and require manual uncache before resize!");
+ if (lv_is_thin_pool_data(seg->lv)) {
+ log_warn("WARNING: thin pool data will not be automatically extended when cached.");
+ log_warn("WARNING: manual splitcache is required before extending thin pool data.");
+ }
}
/*
5 years, 5 months