master - build: make generate
by Marian Csontos
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=205fb35b50a14f10b95...
Commit: 205fb35b50a14f10b95bb4a47935010efe6d96e8
Parent: 10a095a58b3c564d54ded4d59c2286546fc83b74
Author: Marian Csontos <mcsontos(a)redhat.com>
AuthorDate: Thu Nov 26 17:37:32 2020 +0100
Committer: Marian Csontos <mcsontos(a)redhat.com>
CommitterDate: Thu Nov 26 17:37:32 2020 +0100
build: make generate
---
conf/example.conf.in | 3 +--
man/vgsplit.8_pregen | 7 ++++---
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/conf/example.conf.in b/conf/example.conf.in
index 6b0622ab9..fe17942d9 100644
--- a/conf/example.conf.in
+++ b/conf/example.conf.in
@@ -937,8 +937,7 @@ global {
# a volume group's metadata, instead of always granting the read-only
# requests immediately, delay them to allow the read-write requests to
# be serviced. Without this setting, write access may be stalled by a
- # high volume of read-only requests. This option only affects
- # locking_type 1 viz. local file-based locking.
+ # high volume of read-only requests. This option only affects file locks.
prioritise_write_locks = 1
# Configuration option global/library_dir.
diff --git a/man/vgsplit.8_pregen b/man/vgsplit.8_pregen
index 3adb0510f..9731aa4fa 100644
--- a/man/vgsplit.8_pregen
+++ b/man/vgsplit.8_pregen
@@ -8,9 +8,10 @@ vgsplit - Move physical volumes into a new or existing volume group
[ \fIoption_args\fP ]
.br
.SH DESCRIPTION
-vgsplit moves one or more PVs from a source VG to a destination VG. The
-PVs can be specified explicitly or implicitly by naming an LV, in which
-case on PVs underlying the LV are moved.
+vgsplit moves one or more PVs from a source VG (the first VG arg) to a
+destination VG (the second VG arg). The PV(s) to move are named after the
+source and destination VGs, or an LV is named, in which case the PVs
+underlying the LV are moved.
If the destination VG does not exist, a new VG is created (command options
can be used to specify properties of the new VG, also see
2 years, 10 months
main - udev rule: remove lvmetad comments
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=10a095a58b3c564d54d...
Commit: 10a095a58b3c564d54ded4d59c2286546fc83b74
Parent: b68141a49d94db7fa98beb345bc5a583d874e9e7
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Wed Nov 25 16:57:54 2020 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Wed Nov 25 16:57:54 2020 -0600
udev rule: remove lvmetad comments
---
udev/69-dm-lvm-metad.rules.in | 15 +++------------
1 file changed, 3 insertions(+), 12 deletions(-)
diff --git a/udev/69-dm-lvm-metad.rules.in b/udev/69-dm-lvm-metad.rules.in
index d51006496..78f506520 100644
--- a/udev/69-dm-lvm-metad.rules.in
+++ b/udev/69-dm-lvm-metad.rules.in
@@ -4,14 +4,7 @@
# Udev rules for LVM.
#
-# Scan all block devices having a PV label for LVM metadata.
-# Store this information in LVMetaD (the LVM metadata daemon) and maintain LVM
-# metadata state for improved performance by avoiding further scans while
-# running subsequent LVM commands or while using lvm2app library.
-# Also, notify LVMetaD about any relevant block device removal.
-#
-# This rule is essential for having the information in LVMetaD up-to-date.
-# It also requires blkid to be called on block devices before so only devices
+# This rule requires blkid to be called on block devices before so only devices
# used as LVM PVs are processed (ID_FS_TYPE="LVM2_member" or "LVM1_member").
SUBSYSTEM!="block", GOTO="lvm_end"
@@ -19,8 +12,7 @@ SUBSYSTEM!="block", GOTO="lvm_end"
ENV{DM_UDEV_DISABLE_OTHER_RULES_FLAG}=="1", GOTO="lvm_end"
-# If the PV label got lost, inform lvmetad immediately.
-# Detect the lost PV label by comparing previous ID_FS_TYPE value with current one.
+# Detect removed PV label by comparing previous ID_FS_TYPE value with current one.
ENV{.ID_FS_TYPE_NEW}="$env{ID_FS_TYPE}"
IMPORT{db}="ID_FS_TYPE"
ENV{ID_FS_TYPE}=="LVM2_member|LVM1_member", ENV{.ID_FS_TYPE_NEW}!="LVM2_member|LVM1_member", ENV{LVM_PV_GONE}="1"
@@ -31,7 +23,6 @@ ENV{LVM_PV_GONE}=="1", GOTO="lvm_scan"
ENV{ID_FS_TYPE}!="LVM2_member|LVM1_member", GOTO="lvm_end"
ENV{DM_MULTIPATH_DEVICE_PATH}=="1", GOTO="lvm_end"
-# Inform lvmetad about any PV that is gone.
ACTION=="remove", GOTO="lvm_scan"
# Create /dev/disk/by-id/lvm-pv-uuid-<PV_UUID> symlink for each PV
@@ -69,7 +60,6 @@ ENV{LVM_LOOP_PV_ACTIVATED}!="1", ENV{SYSTEMD_READY}="0"
GOTO="lvm_end"
# If the PV is not a special device listed above, scan only if necessary.
-# For "direct_pvscan" mode (see below), this means run rules only an ADD events.
# For "systemd_background" mode, systemd takes care of this by activating
# the lvm2-pvscan@.service only once.
LABEL="next"
@@ -113,6 +103,7 @@ ENV{SYSTEMD_ALIAS}="/dev/block/$major:$minor"
ENV{SYSTEMD_WANTS}+="lvm2-pvscan@$major:$minor.service"
GOTO="lvm_end"
+# FIXME: this mode is not used and should be removed.
LABEL="direct_pvscan"
# The table below summarises the situations in which we reach the LABEL="lvm_scan"
2 years, 10 months
main - lvm.conf: remove reference to locking_type
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=b68141a49d94db7fa98...
Commit: b68141a49d94db7fa98beb345bc5a583d874e9e7
Parent: 9c0253d930103c33784fac5b303da20104a314b6
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Tue Nov 17 11:19:55 2020 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Tue Nov 17 11:19:55 2020 -0600
lvm.conf: remove reference to locking_type
---
lib/config/config_settings.h | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/lib/config/config_settings.h b/lib/config/config_settings.h
index e4e3dcdde..163e01485 100644
--- a/lib/config/config_settings.h
+++ b/lib/config/config_settings.h
@@ -982,8 +982,7 @@ cfg(global_prioritise_write_locks_CFG, "prioritise_write_locks", global_CFG_SECT
"a volume group's metadata, instead of always granting the read-only\n"
"requests immediately, delay them to allow the read-write requests to\n"
"be serviced. Without this setting, write access may be stalled by a\n"
- "high volume of read-only requests. This option only affects\n"
- "locking_type 1 viz. local file-based locking.\n")
+ "high volume of read-only requests. This option only affects file locks.\n")
cfg(global_library_dir_CFG, "library_dir", global_CFG_SECTION, CFG_DEFAULT_UNDEFINED, CFG_TYPE_STRING, NULL, vsn(1, 0, 0), NULL, 0, NULL,
"Search this directory first for shared libraries.\n")
2 years, 10 months
master - man: vgsplit source and destination VGs
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=9c0253d930103c33784...
Commit: 9c0253d930103c33784fac5b303da20104a314b6
Parent: aba9652e584b6f6a422233dea951eb59326a3de2
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Tue Nov 17 11:00:40 2020 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Tue Nov 17 11:00:40 2020 -0600
man: vgsplit source and destination VGs
make clearer which is source and which is destination
---
man/vgsplit.8_des | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/man/vgsplit.8_des b/man/vgsplit.8_des
index 29eb5c5cb..d42ff1628 100644
--- a/man/vgsplit.8_des
+++ b/man/vgsplit.8_des
@@ -1,6 +1,7 @@
-vgsplit moves one or more PVs from a source VG to a destination VG. The
-PVs can be specified explicitly or implicitly by naming an LV, in which
-case on PVs underlying the LV are moved.
+vgsplit moves one or more PVs from a source VG (the first VG arg) to a
+destination VG (the second VG arg). The PV(s) to move are named after the
+source and destination VGs, or an LV is named, in which case the PVs
+underlying the LV are moved.
If the destination VG does not exist, a new VG is created (command options
can be used to specify properties of the new VG, also see
2 years, 10 months
main - lvchange: fix error for foreign vg activation
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=aba9652e584b6f6a422...
Commit: aba9652e584b6f6a422233dea951eb59326a3de2
Parent: 1cc75317f95def9521af535f4c58fb79df816b8c
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Tue Nov 17 09:22:40 2020 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Tue Nov 17 09:22:40 2020 -0600
lvchange: fix error for foreign vg activation
was using ECMD_FAILED instead of 0.
---
tools/lvchange.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/lvchange.c b/tools/lvchange.c
index f9a0b54e3..f37b68381 100644
--- a/tools/lvchange.c
+++ b/tools/lvchange.c
@@ -202,7 +202,7 @@ static int _lvchange_activate(struct cmd_context *cmd, struct logical_volume *lv
strcmp(lv->vg->system_id, cmd->system_id) &&
is_change_activating(activate)) {
log_error("Cannot activate LVs in a foreign VG.");
- return ECMD_FAILED;
+ return 0;
}
if (lv_activation_skip(lv, activate, arg_is_set(cmd, ignoreactivationskip_ARG)))
2 years, 10 months
main - tests: integrity mismatch checks for all raid levels
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=1cc75317f95def9521a...
Commit: 1cc75317f95def9521af535f4c58fb79df816b8c
Parent: 5fef89361d45797d2e478419caff4528b5ac6150
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Wed Nov 11 15:13:46 2020 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Wed Nov 11 15:13:46 2020 -0600
tests: integrity mismatch checks for all raid levels
Verify that corruption is corrected for raid levels other
than raid1. For other raid levels, attempt to corrupt the
given file pattern on each underlying device, since we don't
know which device contains the file being corrupted.
This ensures that corruption is actually be introduced
when testing the other raid levels.
Verify that corruption is being corrected by checking
the integritymismatches count is non-zero for the raid LV,
which includes the total from all images (since we don't
know which image will have the corruption.)
---
test/shell/integrity-syncaction.sh | 6 ++++
test/shell/integrity.sh | 56 ++++++++++++++++++++++++--------------
2 files changed, 41 insertions(+), 21 deletions(-)
diff --git a/test/shell/integrity-syncaction.sh b/test/shell/integrity-syncaction.sh
index d26855665..a1d96fb0e 100644
--- a/test/shell/integrity-syncaction.sh
+++ b/test/shell/integrity-syncaction.sh
@@ -176,6 +176,8 @@ _wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/$lv1
_test1
+lvs -o integritymismatches $vg/$lv1 |tee mismatch
+not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
@@ -187,6 +189,8 @@ _wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/$lv1
_test2
+lvs -o integritymismatches $vg/$lv1 |tee mismatch
+not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
@@ -199,6 +203,8 @@ _wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/$lv1
_test1
+lvs -o integritymismatches $vg/$lv1 |tee mismatch
+not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
diff --git a/test/shell/integrity.sh b/test/shell/integrity.sh
index e17862595..ffac50902 100644
--- a/test/shell/integrity.sh
+++ b/test/shell/integrity.sh
@@ -61,20 +61,18 @@ _test_fs_with_read_repair() {
umount $mnt
lvchange -an $vg/$lv1
- xxd "$dev1" > dev1.txt
- # corrupt fileB
- sed -e 's/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev1.txt > dev1.bad
- rm -f dev1.txt
- xxd -r dev1.bad > "$dev1"
- rm -f dev1.bad
+ for dev in "$@"; do
+ xxd "$dev" > dev.txt
+ # corrupt fileB
+ sed -e 's/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev.txt > dev.bad
+ rm -f dev.txt
+ xxd -r dev.bad > "$dev"
+ rm -f dev.bad
+ done
lvchange -ay $vg/$lv1
- lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
- grep 0 mismatch
-
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
-
cmp -b $mnt/fileA fileA
cmp -b $mnt/fileB fileB
cmp -b $mnt/fileC fileC
@@ -174,9 +172,11 @@ lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2"
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/$lv1
-_test_fs_with_read_repair
+_test_fs_with_read_repair "$dev1"
lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
not grep 0 mismatch
+lvs -o integritymismatches $vg/$lv1 |tee mismatch
+not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
@@ -188,9 +188,11 @@ _wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/$lv1
-_test_fs_with_read_repair
+_test_fs_with_read_repair "$dev1" "$dev2"
lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
not grep 0 mismatch
+lvs -o integritymismatches $vg/$lv1 |tee mismatch
+not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
@@ -202,10 +204,12 @@ _wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/$lv1
-_test_fs_with_read_repair
+_test_fs_with_read_repair "$dev1" "$dev2" "$dev3"
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
lvs -o integritymismatches $vg/${lv1}_rimage_2
+lvs -o integritymismatches $vg/$lv1 |tee mismatch
+not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
@@ -217,10 +221,12 @@ _wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/$lv1
-_test_fs_with_read_repair
+_test_fs_with_read_repair "$dev1" "$dev2" "$dev3"
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
lvs -o integritymismatches $vg/${lv1}_rimage_2
+lvs -o integritymismatches $vg/$lv1 |tee mismatch
+not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
@@ -234,12 +240,14 @@ _wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/${lv1}_rimage_3
_wait_recalc $vg/${lv1}_rimage_4
_wait_recalc $vg/$lv1
-_test_fs_with_read_repair
+_test_fs_with_read_repair "$dev1" "$dev2" "$dev3" "$dev4" "$dev5"
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
lvs -o integritymismatches $vg/${lv1}_rimage_2
lvs -o integritymismatches $vg/${lv1}_rimage_3
lvs -o integritymismatches $vg/${lv1}_rimage_4
+lvs -o integritymismatches $vg/$lv1 |tee mismatch
+not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
@@ -252,11 +260,13 @@ _wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/${lv1}_rimage_3
_wait_recalc $vg/$lv1
-_test_fs_with_read_repair
+_test_fs_with_read_repair "$dev1" "$dev3"
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
lvs -o integritymismatches $vg/${lv1}_rimage_2
lvs -o integritymismatches $vg/${lv1}_rimage_3
+lvs -o integritymismatches $vg/$lv1 |tee mismatch
+not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
@@ -611,32 +621,36 @@ vgremove -ff $vg
# Repeat many of the tests above using bitmap mode
_prepare_vg
-lvcreate --type raid1 -m1 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg
+lvcreate --type raid1 -m1 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg "$dev1 "$dev2"
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/$lv1
-_test_fs_with_read_repair
+_test_fs_with_read_repair "$dev1"
lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
not grep 0 mismatch
+lvs -o integritymismatches $vg/$lv1 |tee mismatch
+not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
vgremove -ff $vg
_prepare_vg
-lvcreate --type raid6 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg
+lvcreate --type raid6 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5"
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/${lv1}_rimage_3
_wait_recalc $vg/${lv1}_rimage_4
_wait_recalc $vg/$lv1
-_test_fs_with_read_repair
+_test_fs_with_read_repair "$dev1" "$dev2" "$dev3" "$dev4" "$dev5"
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
lvs -o integritymismatches $vg/${lv1}_rimage_2
lvs -o integritymismatches $vg/${lv1}_rimage_3
lvs -o integritymismatches $vg/${lv1}_rimage_4
+lvs -o integritymismatches $vg/$lv1 |tee mismatch
+not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
@@ -644,7 +658,7 @@ vgremove -ff $vg
# remove from active lv
_prepare_vg
-lvcreate --type raid1 -m1 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg
+lvcreate --type raid1 -m1 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg "$dev1" "$dev2"
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_add_new_data_to_mnt
2 years, 10 months
main - integrity: display total mismatches at raid LV level
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=5fef89361d45797d2e4...
Commit: 5fef89361d45797d2e478419caff4528b5ac6150
Parent: 2317ba393459a8848a83b43891188520c6a06559
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Wed Nov 11 15:10:15 2020 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Wed Nov 11 15:10:15 2020 -0600
integrity: display total mismatches at raid LV level
Each integrity image in a raid LV reports its own number
of integrity mismatches, e.g.
lvs -o integritymismatches vg/lv_rimage_0
lvs -o integritymismatches vg/lv_rimage_1
In addition to this, allow the total number of integrity
mismatches from all images to be displayed for the raid LV.
lvs -o integritymismatches vg/lv
shows the number of mismatches from both lv_rimage_0 and
lv_rimage_1.
---
lib/metadata/integrity_manip.c | 40 ++++++++++++++++++++++++++++++++++++++++
lib/metadata/metadata-exported.h | 1 +
lib/report/report.c | 4 ++++
3 files changed, 45 insertions(+)
diff --git a/lib/metadata/integrity_manip.c b/lib/metadata/integrity_manip.c
index 00d310e36..53ab1b3fa 100644
--- a/lib/metadata/integrity_manip.c
+++ b/lib/metadata/integrity_manip.c
@@ -895,12 +895,52 @@ int lv_get_raid_integrity_settings(struct logical_volume *lv, struct integrity_s
return 0;
}
+int lv_raid_integrity_total_mismatches(struct cmd_context *cmd,
+ const struct logical_volume *lv,
+ uint64_t *mismatches)
+{
+ struct logical_volume *lv_image;
+ struct lv_segment *seg, *seg_image;
+ uint32_t s;
+ uint64_t mismatches_image;
+ uint64_t total = 0;
+ int errors = 0;
+
+ if (!lv_is_raid(lv))
+ return 0;
+
+ seg = first_seg(lv);
+
+ for (s = 0; s < seg->area_count; s++) {
+ lv_image = seg_lv(seg, s);
+ seg_image = first_seg(lv_image);
+
+ if (!seg_is_integrity(seg_image))
+ continue;
+
+ mismatches_image = 0;
+
+ if (!lv_integrity_mismatches(cmd, lv_image, &mismatches_image))
+ errors++;
+
+ total += mismatches_image;
+ }
+ *mismatches = total;
+
+ if (errors)
+ return 0;
+ return 1;
+}
+
int lv_integrity_mismatches(struct cmd_context *cmd,
const struct logical_volume *lv,
uint64_t *mismatches)
{
struct lv_with_info_and_seg_status status;
+ if (lv_is_raid(lv) && lv_raid_has_integrity((struct logical_volume *)lv))
+ return lv_raid_integrity_total_mismatches(cmd, lv, mismatches);
+
if (!lv_is_integrity(lv))
return_0;
diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h
index 37fe9d0ad..54dc29ffe 100644
--- a/lib/metadata/metadata-exported.h
+++ b/lib/metadata/metadata-exported.h
@@ -1433,5 +1433,6 @@ int lv_extend_integrity_in_raid(struct logical_volume *lv, struct dm_list *pvh);
int lv_get_raid_integrity_settings(struct logical_volume *lv, struct integrity_settings **isettings);
int integrity_mode_set(const char *mode, struct integrity_settings *settings);
int lv_integrity_mismatches(struct cmd_context *cmd, const struct logical_volume *lv, uint64_t *mismatches);
+int lv_raid_integrity_total_mismatches(struct cmd_context *cmd, const struct logical_volume *lv, uint64_t *mismatches);
#endif
diff --git a/lib/report/report.c b/lib/report/report.c
index 73a150a7e..2f50a990c 100644
--- a/lib/report/report.c
+++ b/lib/report/report.c
@@ -3338,6 +3338,10 @@ static int _integritymismatches_disp(struct dm_report *rh __attribute__((unused)
if (lv_is_integrity(lv) && lv_integrity_mismatches(lv->vg->cmd, lv, &mismatches))
return dm_report_field_uint64(rh, field, &mismatches);
+ if (lv_is_raid(lv) && lv_raid_has_integrity(lv) &&
+ lv_raid_integrity_total_mismatches(lv->vg->cmd, lv, &mismatches))
+ return dm_report_field_uint64(rh, field, &mismatches);
+
return _field_set_value(field, "", &GET_TYPE_RESERVED_VALUE(num_undef_64));
}
2 years, 10 months
main - tests: update integrity tests
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=2317ba393459a8848a8...
Commit: 2317ba393459a8848a83b43891188520c6a06559
Parent: d7058cfa989762ad33f115528d572cda80918cca
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Tue Nov 10 17:41:04 2020 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Tue Nov 10 17:41:04 2020 -0600
tests: update integrity tests
simplified the method of corrupting data, the old method
was not working reliably. moved syncation tests to a
different file
---
test/shell/integrity-syncaction.sh | 206 ++++++++++++++++++++++++++
test/shell/integrity.sh | 286 +++++++++----------------------------
2 files changed, 275 insertions(+), 217 deletions(-)
diff --git a/test/shell/integrity-syncaction.sh b/test/shell/integrity-syncaction.sh
new file mode 100644
index 000000000..d26855665
--- /dev/null
+++ b/test/shell/integrity-syncaction.sh
@@ -0,0 +1,206 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
+#
+# This copyrighted material is made available to anyone wishing to use,
+# modify, copy, or redistribute it subject to the terms and conditions
+# of the GNU General Public License v.2.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+SKIP_WITH_LVMPOLLD=1
+
+. lib/inittest
+
+aux have_integrity 1 5 0 || skip
+which mkfs.xfs || skip
+which xfs_growfs || skip
+
+mnt="mnt"
+mkdir -p $mnt
+
+aux prepare_devs 3 40
+
+# Use awk instead of anoyingly long log out from printf
+#printf "%0.sA" {1..16384} >> fileA
+awk 'BEGIN { while (z++ < 16384) printf "A" }' > fileA
+awk 'BEGIN { while (z++ < 16384) printf "B" }' > fileB
+awk 'BEGIN { while (z++ < 16384) printf "C" }' > fileC
+
+_prepare_vg() {
+ # zero devs so we are sure to find the correct file data
+ # on the underlying devs when corrupting it
+ dd if=/dev/zero of="$dev1" bs=1M oflag=direct || true
+ dd if=/dev/zero of="$dev2" bs=1M oflag=direct || true
+ dd if=/dev/zero of="$dev3" bs=1M oflag=direct || true
+ vgcreate $SHARED $vg "$dev1" "$dev2" "$dev3"
+ pvs
+}
+
+_test1() {
+ mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
+
+ mount "$DM_DEV_DIR/$vg/$lv1" $mnt
+
+ # we don't want fileA to be located too early in the fs,
+ # otherwise activating the LV will trigger the corruption
+ # to be found and corrected, leaving nothing for syncaction
+ # to find and correct.
+ dd if=/dev/urandom of=$mnt/rand16M bs=1M count=16
+
+ cp fileA $mnt
+ cp fileB $mnt
+ cp fileC $mnt
+
+ umount $mnt
+ lvchange -an $vg/$lv1
+
+ xxd "$dev1" > dev1.txt
+ # corrupt fileB
+ sed -e 's/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev1.txt > dev1.bad
+ rm -f dev1.txt
+ xxd -r dev1.bad > "$dev1"
+ rm -f dev1.bad
+
+ lvchange -ay $vg/$lv1
+
+ lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
+ grep 0 mismatch
+
+ lvchange --syncaction check $vg/$lv1
+
+ _wait_recalc $vg/$lv1
+
+ lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
+ not grep 0 mismatch
+
+ mount "$DM_DEV_DIR/$vg/$lv1" $mnt
+ cmp -b $mnt/fileA fileA
+ cmp -b $mnt/fileB fileB
+ cmp -b $mnt/fileC fileC
+ umount $mnt
+}
+
+_test2() {
+ mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
+
+ mount "$DM_DEV_DIR/$vg/$lv1" $mnt
+
+ # we don't want fileA to be located too early in the fs,
+ # otherwise activating the LV will trigger the corruption
+ # to be found and corrected, leaving nothing for syncaction
+ # to find and correct.
+ dd if=/dev/urandom of=$mnt/rand16M bs=1M count=16
+
+ cp fileA $mnt
+ cp fileB $mnt
+ cp fileC $mnt
+
+ umount $mnt
+ lvchange -an $vg/$lv1
+
+ # corrupt fileB and fileC on dev1
+ xxd "$dev1" > dev1.txt
+ sed -e 's/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev1.txt > dev1.bad
+ sed -e 's/4343 4343 4343 4343 4343 4343 4343 4343/4444 4444 4444 4444 4444 4444 4444 4444/' dev1.txt > dev1.bad
+ rm -f dev1.txt
+ xxd -r dev1.bad > "$dev1"
+ rm -f dev1.bad
+
+ # corrupt fileA on dev2
+ xxd "$dev2" > dev2.txt
+ sed -e 's/4141 4141 4141 4141 4141 4141 4141 4141/4141 4141 4141 4141 4141 4141 4145 4141/' dev2.txt > dev2.bad
+ rm -f dev2.txt
+ xxd -r dev2.bad > "$dev2"
+ rm -f dev2.bad
+
+ lvchange -ay $vg/$lv1
+
+ lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
+ grep 0 mismatch
+ lvs -o integritymismatches $vg/${lv1}_rimage_1 |tee mismatch
+ grep 0 mismatch
+
+ lvchange --syncaction check $vg/$lv1
+
+ _wait_recalc $vg/$lv1
+
+ lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
+ not grep 0 mismatch
+ lvs -o integritymismatches $vg/${lv1}_rimage_1 |tee mismatch
+ not grep 0 mismatch
+
+ mount "$DM_DEV_DIR/$vg/$lv1" $mnt
+ cmp -b $mnt/fileA fileA
+ cmp -b $mnt/fileB fileB
+ cmp -b $mnt/fileC fileC
+ umount $mnt
+}
+
+_sync_percent() {
+ local checklv=$1
+ get lv_field "$checklv" sync_percent | cut -d. -f1
+}
+
+_wait_recalc() {
+ local checklv=$1
+
+ for i in $(seq 1 10) ; do
+ sync=$(_sync_percent "$checklv")
+ echo "sync_percent is $sync"
+
+ if test "$sync" = "100"; then
+ return
+ fi
+
+ sleep 1
+ done
+
+ # TODO: There is some strange bug, first leg of RAID with integrity
+ # enabled never gets in sync. I saw this in BB, but not when executing
+ # the commands manually
+ if test -z "$sync"; then
+ echo "TEST\ WARNING: Resync of dm-integrity device '$checklv' failed"
+ dmsetup status "$DM_DEV_DIR/mapper/${checklv/\//-}"
+ exit
+ fi
+ echo "timeout waiting for recalc"
+ return 1
+}
+
+_prepare_vg
+lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 6 $vg "$dev1" "$dev2"
+_wait_recalc $vg/${lv1}_rimage_0
+_wait_recalc $vg/${lv1}_rimage_1
+_wait_recalc $vg/$lv1
+_test1
+lvchange -an $vg/$lv1
+lvconvert --raidintegrity n $vg/$lv1
+lvremove $vg/$lv1
+vgremove -ff $vg
+
+_prepare_vg
+lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 6 $vg "$dev1" "$dev2"
+_wait_recalc $vg/${lv1}_rimage_0
+_wait_recalc $vg/${lv1}_rimage_1
+_wait_recalc $vg/$lv1
+_test2
+lvchange -an $vg/$lv1
+lvconvert --raidintegrity n $vg/$lv1
+lvremove $vg/$lv1
+vgremove -ff $vg
+
+_prepare_vg
+lvcreate --type raid5 --raidintegrity y -n $lv1 -l 6 $vg "$dev1" "$dev2" "$dev3"
+_wait_recalc $vg/${lv1}_rimage_0
+_wait_recalc $vg/${lv1}_rimage_1
+_wait_recalc $vg/${lv1}_rimage_2
+_wait_recalc $vg/$lv1
+_test1
+lvchange -an $vg/$lv1
+lvconvert --raidintegrity n $vg/$lv1
+lvremove $vg/$lv1
+vgremove -ff $vg
+
diff --git a/test/shell/integrity.sh b/test/shell/integrity.sh
index 7dd237b93..e17862595 100644
--- a/test/shell/integrity.sh
+++ b/test/shell/integrity.sh
@@ -46,109 +46,14 @@ _prepare_vg() {
pvs
}
-_test_fs_with_error() {
- mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
-
- mount "$DM_DEV_DIR/$vg/$lv1" $mnt
-
- # add original data
- cp fileA $mnt
- cp fileB $mnt
- cp fileC $mnt
-
- umount $mnt
- lvchange -an $vg/$lv1
-
- # corrupt the original data on the underying dev
- # flip one bit in fileB, changing a 0x42 to 0x43
- # the bit is changed in the last 4096 byte block
- # of the file, so when reading back the file we
- # will get the first three 4096 byte blocks, for
- # a total of 12288 bytes before getting an error
- # on the last 4096 byte block.
- xxd "$dev1" > dev1.txt
- tac dev1.txt > dev1.rev
- rm -f dev1.txt
- sed -e '0,/4242 4242 4242 4242 4242 4242 4242 4242/ s/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev1.rev > dev1.rev.bad
- rm -f dev1.rev
- tac dev1.rev.bad > dev1.bad
- rm -f dev1.rev.bad
- xxd -r dev1.bad > "$dev1"
- rm -f dev1.bad
-
- lvchange -ay $vg/$lv1
- mount "$DM_DEV_DIR/$vg/$lv1" $mnt
-
- # read complete fileA which was not corrupted
- dd if=$mnt/fileA of=tmp bs=1k
- ls -l tmp
- stat -c %s tmp
- cmp -b fileA tmp
- rm tmp
-
- # read partial fileB which was corrupted
- not dd if=$mnt/fileB of=tmp bs=1k
- ls -l tmp
- stat -c %s tmp | grep 12288
- not cmp -b fileB tmp
- rm tmp
-
- umount $mnt
-}
-
_test_fs_with_read_repair() {
mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
- # add original data
- cp fileA $mnt
- cp fileB $mnt
- cp fileC $mnt
-
- umount $mnt
- lvchange -an $vg/$lv1
-
- # FIXME: this is only finding/corrupting the bit with raid1
- # other raid levels may require looking at a different dev.
- # (Attempt this xxd/tac/sed/xxd on each dev in the LV?)
-
- xxd "$dev1" > dev1.txt
- tac dev1.txt > dev1.rev
- rm -f dev1.txt
- sed -e '0,/4242 4242 4242 4242 4242 4242 4242 4242/ s/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev1.rev > dev1.rev.bad
- rm -f dev1.rev
- tac dev1.rev.bad > dev1.bad
- rm -f dev1.rev.bad
- xxd -r dev1.bad > "$dev1"
- rm -f dev1.bad
-
- lvchange -ay $vg/$lv1
- mount "$DM_DEV_DIR/$vg/$lv1" $mnt
-
- # read complete fileA which was not corrupted
- dd if=$mnt/fileA of=tmp bs=1k
- ls -l tmp
- stat -c %s tmp | grep 16384
- cmp -b fileA tmp
- rm tmp
-
- # read complete fileB, corruption is corrected by raid
- dd if=$mnt/fileB of=tmp bs=1k
- ls -l tmp
- stat -c %s tmp | grep 16384
- cmp -b fileB tmp
- rm tmp
-
- umount $mnt
-}
-
-_test_fs_with_syncaction_check() {
- mkfs.xfs -f -s size=4096 "$DM_DEV_DIR/$vg/$lv1"
-
- mount "$DM_DEV_DIR/$vg/$lv1" $mnt
-
- # add original data
+ cp randA $mnt
+ cp randB $mnt
+ cp randC $mnt
cp fileA $mnt
cp fileB $mnt
cp fileC $mnt
@@ -156,40 +61,23 @@ _test_fs_with_syncaction_check() {
umount $mnt
lvchange -an $vg/$lv1
- # FIXME: this is only finding/corrupting the bit with raid1
- # other raid levels may require looking at a different dev.
- # (Attempt this xxd/tac/sed/xxd on each dev in the LV?)
-
xxd "$dev1" > dev1.txt
- tac dev1.txt > dev1.rev
+ # corrupt fileB
+ sed -e 's/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev1.txt > dev1.bad
rm -f dev1.txt
- sed -e '0,/4242 4242 4242 4242 4242 4242 4242 4242/ s/4242 4242 4242 4242 4242 4242 4242 4242/4242 4242 4242 4242 4242 4242 4242 4243/' dev1.rev > dev1.rev.bad
- rm -f dev1.rev
- tac dev1.rev.bad > dev1.bad
- rm -f dev1.rev.bad
xxd -r dev1.bad > "$dev1"
rm -f dev1.bad
lvchange -ay $vg/$lv1
- lvchange --syncaction check $vg/$lv1
+ lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
+ grep 0 mismatch
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
- # read complete fileA which was not corrupted
- dd if=$mnt/fileA of=tmp bs=1k
- ls -l tmp
- stat -c %s tmp | grep 16384
- cmp -b fileA tmp
- rm tmp
-
- # read complete fileB
- dd if=$mnt/fileB of=tmp bs=1k
- ls -l tmp
- stat -c %s tmp | grep 16384
- cmp -b fileB tmp
- rm tmp
-
+ cmp -b $mnt/fileA fileA
+ cmp -b $mnt/fileB fileB
+ cmp -b $mnt/fileC fileC
umount $mnt
}
@@ -282,36 +170,38 @@ _wait_recalc() {
# it is detected by integrity and corrected by raid.
_prepare_vg
-lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
+lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2"
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
+_wait_recalc $vg/$lv1
_test_fs_with_read_repair
-lvs -o integritymismatches $vg/${lv1}_rimage_0
-lvs -o integritymismatches $vg/${lv1}_rimage_1
+lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
+not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
vgremove -ff $vg
_prepare_vg
-lvcreate --type raid1 -m2 --raidintegrity y -n $lv1 -l 8 $vg
+lvcreate --type raid1 -m2 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" "$dev3"
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
+_wait_recalc $vg/$lv1
_test_fs_with_read_repair
-lvs -o integritymismatches $vg/${lv1}_rimage_0
-lvs -o integritymismatches $vg/${lv1}_rimage_1
-lvs -o integritymismatches $vg/${lv1}_rimage_2
+lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
+not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
vgremove -ff $vg
_prepare_vg
-lvcreate --type raid4 --raidintegrity y -n $lv1 -l 8 $vg
+lvcreate --type raid4 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" "$dev3"
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
+_wait_recalc $vg/$lv1
_test_fs_with_read_repair
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
@@ -322,10 +212,11 @@ lvremove $vg/$lv1
vgremove -ff $vg
_prepare_vg
-lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg
+lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" "$dev3"
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
+_wait_recalc $vg/$lv1
_test_fs_with_read_repair
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
@@ -336,12 +227,13 @@ lvremove $vg/$lv1
vgremove -ff $vg
_prepare_vg
-lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg
+lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5"
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/${lv1}_rimage_3
_wait_recalc $vg/${lv1}_rimage_4
+_wait_recalc $vg/$lv1
_test_fs_with_read_repair
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
@@ -354,11 +246,12 @@ lvremove $vg/$lv1
vgremove -ff $vg
_prepare_vg
-lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg
+lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg "$dev1" "$dev2" "$dev3" "$dev4"
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
_wait_recalc $vg/${lv1}_rimage_3
+_wait_recalc $vg/$lv1
_test_fs_with_read_repair
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
@@ -369,94 +262,13 @@ lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
vgremove -ff $vg
-# Test corrupting data on an image and verifying that
-# it is detected and corrected using syncaction check
-
-_prepare_vg
-lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
-_wait_recalc $vg/${lv1}_rimage_0
-_wait_recalc $vg/${lv1}_rimage_1
-_test_fs_with_syncaction_check
-lvs -o integritymismatches $vg/${lv1}_rimage_0
-lvs -o integritymismatches $vg/${lv1}_rimage_1
-check lv_field $vg/${lv1}_rimage_0 integritymismatches "1"
-check lv_field $vg/${lv1}_rimage_1 integritymismatches "0"
-lvchange -an $vg/$lv1
-lvconvert --raidintegrity n $vg/$lv1
-lvremove $vg/$lv1
-vgremove -ff $vg
-
-_prepare_vg
-lvcreate --type raid4 --raidintegrity y -n $lv1 -l 8 $vg
-_wait_recalc $vg/${lv1}_rimage_0
-_wait_recalc $vg/${lv1}_rimage_1
-_wait_recalc $vg/${lv1}_rimage_2
-_test_fs_with_syncaction_check
-lvs -o integritymismatches $vg/${lv1}_rimage_0
-lvs -o integritymismatches $vg/${lv1}_rimage_1
-lvs -o integritymismatches $vg/${lv1}_rimage_2
-check lv_field $vg/${lv1}_rimage_0 integritymismatches "2"
-check lv_field $vg/${lv1}_rimage_1 integritymismatches "0"
-check lv_field $vg/${lv1}_rimage_2 integritymismatches "0"
-lvchange -an $vg/$lv1
-lvconvert --raidintegrity n $vg/$lv1
-lvremove $vg/$lv1
-vgremove -ff $vg
-
-_prepare_vg
-lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg
-_wait_recalc $vg/${lv1}_rimage_0
-_wait_recalc $vg/${lv1}_rimage_1
-_wait_recalc $vg/${lv1}_rimage_2
-_test_fs_with_syncaction_check
-lvs -o integritymismatches $vg/${lv1}_rimage_0
-lvs -o integritymismatches $vg/${lv1}_rimage_1
-lvs -o integritymismatches $vg/${lv1}_rimage_2
-lvchange -an $vg/$lv1
-lvconvert --raidintegrity n $vg/$lv1
-lvremove $vg/$lv1
-vgremove -ff $vg
-
-_prepare_vg
-lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg
-_wait_recalc $vg/${lv1}_rimage_0
-_wait_recalc $vg/${lv1}_rimage_1
-_wait_recalc $vg/${lv1}_rimage_2
-_wait_recalc $vg/${lv1}_rimage_3
-_wait_recalc $vg/${lv1}_rimage_4
-_test_fs_with_syncaction_check
-lvs -o integritymismatches $vg/${lv1}_rimage_0
-lvs -o integritymismatches $vg/${lv1}_rimage_1
-lvs -o integritymismatches $vg/${lv1}_rimage_2
-lvs -o integritymismatches $vg/${lv1}_rimage_3
-lvs -o integritymismatches $vg/${lv1}_rimage_4
-lvchange -an $vg/$lv1
-lvconvert --raidintegrity n $vg/$lv1
-lvremove $vg/$lv1
-vgremove -ff $vg
-
-_prepare_vg
-lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg
-_wait_recalc $vg/${lv1}_rimage_0
-_wait_recalc $vg/${lv1}_rimage_1
-_wait_recalc $vg/${lv1}_rimage_2
-_wait_recalc $vg/${lv1}_rimage_3
-_test_fs_with_syncaction_check
-lvs -o integritymismatches $vg/${lv1}_rimage_0
-lvs -o integritymismatches $vg/${lv1}_rimage_1
-lvs -o integritymismatches $vg/${lv1}_rimage_2
-lvs -o integritymismatches $vg/${lv1}_rimage_3
-lvchange -an $vg/$lv1
-lvconvert --raidintegrity n $vg/$lv1
-lvremove $vg/$lv1
-vgremove -ff $vg
-
# Test removing integrity from an active LV
_prepare_vg
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
+_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity n $vg/$lv1
_add_more_data_to_mnt
@@ -471,6 +283,8 @@ _prepare_vg
lvcreate --type raid4 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
+_wait_recalc $vg/${lv1}_rimage_2
+_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity n $vg/$lv1
_add_more_data_to_mnt
@@ -485,6 +299,8 @@ _prepare_vg
lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
+_wait_recalc $vg/${lv1}_rimage_2
+_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity n $vg/$lv1
_add_more_data_to_mnt
@@ -499,6 +315,10 @@ _prepare_vg
lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
+_wait_recalc $vg/${lv1}_rimage_2
+_wait_recalc $vg/${lv1}_rimage_3
+_wait_recalc $vg/${lv1}_rimage_4
+_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity n $vg/$lv1
_add_more_data_to_mnt
@@ -513,6 +333,7 @@ _prepare_vg
lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
+_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity n $vg/$lv1
_add_more_data_to_mnt
@@ -527,6 +348,7 @@ vgremove -ff $vg
_prepare_vg
lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg
+_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity y $vg/$lv1
_wait_recalc $vg/${lv1}_rimage_0
@@ -541,6 +363,7 @@ vgremove -ff $vg
_prepare_vg
lvcreate --type raid4 -n $lv1 -l 8 $vg
+_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity y $vg/$lv1
_wait_recalc $vg/${lv1}_rimage_0
@@ -555,6 +378,7 @@ vgremove -ff $vg
_prepare_vg
lvcreate --type raid5 -n $lv1 -l 8 $vg
+_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity y $vg/$lv1
_wait_recalc $vg/${lv1}_rimage_0
@@ -569,6 +393,12 @@ vgremove -ff $vg
_prepare_vg
lvcreate --type raid6 -n $lv1 -l 8 $vg
+_wait_recalc $vg/${lv1}_rimage_0
+_wait_recalc $vg/${lv1}_rimage_1
+_wait_recalc $vg/${lv1}_rimage_2
+_wait_recalc $vg/${lv1}_rimage_3
+_wait_recalc $vg/${lv1}_rimage_4
+_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity y $vg/$lv1
_wait_recalc $vg/${lv1}_rimage_0
@@ -583,6 +413,7 @@ vgremove -ff $vg
_prepare_vg
lvcreate --type raid10 -n $lv1 -l 8 $vg
+_wait_recalc $vg/$lv1
_add_new_data_to_mnt
lvconvert --raidintegrity y $vg/$lv1
_wait_recalc $vg/${lv1}_rimage_0
@@ -601,6 +432,7 @@ _prepare_vg
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
+_wait_recalc $vg/$lv1
lvs -a -o+devices $vg
_add_new_data_to_mnt
umount $mnt
@@ -624,6 +456,10 @@ _prepare_vg
lvcreate --type raid6 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
+_wait_recalc $vg/${lv1}_rimage_2
+_wait_recalc $vg/${lv1}_rimage_3
+_wait_recalc $vg/${lv1}_rimage_4
+_wait_recalc $vg/$lv1
lvs -a -o+devices $vg
_add_new_data_to_mnt
umount $mnt
@@ -649,6 +485,7 @@ _prepare_vg
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
+_wait_recalc $vg/$lv1
lvs -a -o+devices $vg
_add_new_data_to_mnt
lvextend -l 16 $vg/$lv1
@@ -668,6 +505,8 @@ _prepare_vg
lvcreate --type raid5 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
+_wait_recalc $vg/${lv1}_rimage_2
+_wait_recalc $vg/$lv1
lvs -a -o+devices $vg
_add_new_data_to_mnt
lvextend -l 16 $vg/$lv1
@@ -687,6 +526,7 @@ _prepare_vg
lvcreate --type raid10 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
+_wait_recalc $vg/$lv1
lvs -a -o+devices $vg
_add_new_data_to_mnt
lvextend -l 16 $vg/$lv1
@@ -708,6 +548,7 @@ _prepare_vg
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
+_wait_recalc $vg/$lv1
lvs -a -o+devices $vg
_add_new_data_to_mnt
lvconvert -y -m+1 $vg/$lv1
@@ -730,6 +571,7 @@ lvcreate --type raid1 -m2 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
_wait_recalc $vg/${lv1}_rimage_2
+_wait_recalc $vg/$lv1
lvs -a -o+devices $vg
_add_new_data_to_mnt
lvconvert -y -m-1 $vg/$lv1
@@ -748,6 +590,7 @@ _prepare_vg
lvcreate --type raid1 -m1 --raidintegrity y -n $lv1 -l 8 $vg
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
+_wait_recalc $vg/$lv1
lvs -a -o+devices $vg
_add_new_data_to_mnt
not lvconvert -y -m-1 $vg/$lv1
@@ -769,9 +612,12 @@ vgremove -ff $vg
_prepare_vg
lvcreate --type raid1 -m1 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg
+_wait_recalc $vg/${lv1}_rimage_0
+_wait_recalc $vg/${lv1}_rimage_1
+_wait_recalc $vg/$lv1
_test_fs_with_read_repair
-lvs -o integritymismatches $vg/${lv1}_rimage_0
-lvs -o integritymismatches $vg/${lv1}_rimage_1
+lvs -o integritymismatches $vg/${lv1}_rimage_0 |tee mismatch
+not grep 0 mismatch
lvchange -an $vg/$lv1
lvconvert --raidintegrity n $vg/$lv1
lvremove $vg/$lv1
@@ -779,6 +625,12 @@ vgremove -ff $vg
_prepare_vg
lvcreate --type raid6 --raidintegrity y --raidintegritymode bitmap -n $lv1 -l 8 $vg
+_wait_recalc $vg/${lv1}_rimage_0
+_wait_recalc $vg/${lv1}_rimage_1
+_wait_recalc $vg/${lv1}_rimage_2
+_wait_recalc $vg/${lv1}_rimage_3
+_wait_recalc $vg/${lv1}_rimage_4
+_wait_recalc $vg/$lv1
_test_fs_with_read_repair
lvs -o integritymismatches $vg/${lv1}_rimage_0
lvs -o integritymismatches $vg/${lv1}_rimage_1
2 years, 10 months
main - writecache: supported in dm-writecache version 3
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=d7058cfa989762ad33f...
Commit: d7058cfa989762ad33f115528d572cda80918cca
Parent: 8801a86a3e0c87d92b250a6477f86ef9efdb2ba0
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Mon Nov 9 09:47:01 2020 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Mon Nov 9 09:47:01 2020 -0600
writecache: supported in dm-writecache version 3
not version 2
---
lib/writecache/writecache.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/writecache/writecache.c b/lib/writecache/writecache.c
index 4ecbf50df..2cef9f26e 100644
--- a/lib/writecache/writecache.c
+++ b/lib/writecache/writecache.c
@@ -255,7 +255,7 @@ static int _target_present(struct cmd_context *cmd,
return 0;
}
- if (min >= 2) {
+ if (min >= 3) {
_writecache_cleaner_supported = 1;
_writecache_max_age_supported = 1;
}
2 years, 10 months
master - man: update vdo
by Zdenek Kabelac
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=8801a86a3e0c87d92b2...
Commit: 8801a86a3e0c87d92b250a6477f86ef9efdb2ba0
Parent: 63169594385ed4b02d93c2e8f422bbeb0b8225af
Author: Zdenek Kabelac <zkabelac(a)redhat.com>
AuthorDate: Tue Nov 3 16:32:14 2020 +0100
Committer: Zdenek Kabelac <zkabelac(a)redhat.com>
CommitterDate: Tue Nov 3 16:34:46 2020 +0100
man: update vdo
Enhance VDO man page with description of memory usage
and space requirements chapter.
Remove some unneeded blank lines in man page.
Use more precise terminology.
Correct examples since cpool and vpool are protected names.
---
man/lvmvdo.7_main | 175 +++++++++++++++++++++++++++---------------------------
1 file changed, 88 insertions(+), 87 deletions(-)
diff --git a/man/lvmvdo.7_main b/man/lvmvdo.7_main
index 76a758061..3701e3f58 100644
--- a/man/lvmvdo.7_main
+++ b/man/lvmvdo.7_main
@@ -2,9 +2,7 @@
.SH NAME
lvmvdo \(em LVM Virtual Data Optimizer support
-
.SH DESCRIPTION
-
VDO (which includes kvdo and vdo) is software that provides inline
block-level deduplication, compression, and thin provisioning capabilities
for primary storage.
@@ -13,9 +11,9 @@ Deduplication is a technique for reducing the consumption of storage
resources by eliminating multiple copies of duplicate blocks. Compression
takes the individual unique blocks and shrinks them with coding
algorithms; these reduced blocks are then efficiently packed together into
-physical blocks. Thin provisioning manages the mapping from LBAs presented
-by VDO to where the data has actually been stored, and also eliminates any
-blocks of all zeroes.
+physical blocks. Thin provisioning manages the mapping from logical blocks
+presented by VDO to where the data has actually been physically stored,
+and also eliminates any blocks of all zeroes.
With deduplication, instead of writing the same data more than once each
duplicate block is detected and recorded as a reference to the original
@@ -48,29 +46,23 @@ thin provisioning, block sharing, and compression;
the "\fIuds\fP" module provides memory-efficient duplicate
identification. The userspace tools include \fBvdostats\fP(8)
for extracting statistics from those volumes.
-
-
-.SH VDO Terms
-
+.SH VDO TERMS
.TP
VDODataLV
.br
VDO data LV
.br
-large hidden LV with suffix _vdata created in a VG.
+large hidden LV with suffix _vdata created in a VG
.br
-used by VDO target to store all data and metadata blocks.
-
+used by VDO kernel target to store all data and metadata blocks.
.TP
VDOPoolLV
.br
VDO pool LV
.br
-maintains virtual for LV(s) stored in attached VDO data LV
-and it has same size.
+pool for virtual VDOLV(s) with the size of used VDODataLV
.br
-contains VDOLV(s) (currently supports only a single VDOLV).
-
+a single VDOLV is currently supported.
.TP
VDOLV
.br
@@ -78,14 +70,10 @@ VDO LV
.br
created from VDOPoolLV
.br
-appears blank after creation
-
-.SH VDO Usage
-
+appears blank after creation.
+.SH VDO USAGE
The primary methods for using VDO with lvm2:
-
.SS 1. Create VDOPoolLV with VDOLV
-
Create a VDOPoolLV that will hold VDO data together with
virtual size VDOLV, that user can use. When the virtual size
is not specified, then such LV is created with maximum size that
@@ -106,18 +94,15 @@ operation.
.fi
.I Example
-.br
.nf
# lvcreate --type vdo -n vdo0 -L 10G -V 100G vg/vdopool0
# mkfs.ext4 -E nodiscard /dev/vg/vdo0
.fi
-
.SS 2. Create VDOPoolLV from conversion of an existing LV into VDODataLV
-
Convert an already created/existing LV into a volume that can hold
-VDO data and metadata (a volume reference by VDOPoolLV).
+VDO data and metadata (volume referenced by VDOPoolLV).
User will be prompted to confirm such conversion as it is \fBIRREVERSIBLY
-DESTROYING\fP content of such volume, as it's being immediately
+DESTROYING\fP content of such volume and it is being immediately
formatted by \fBvdoformat\fP(8) as VDO pool data volume. User can
specify virtual size of associated VDOLV with this VDOPoolLV.
When the virtual size is not specified, it will be set to the maximum size
@@ -129,13 +114,10 @@ that can keep 100% uncompressible data there.
.fi
.I Example
-.br
.nf
-# lvconvert --type vdo-pool -n vdo0 -V10G vg/existinglv
+# lvconvert --type vdo-pool -n vdo0 -V10G vg/ExistingLV
.fi
-
.SS 3. Change default settings used for creating VDOPoolLV
-
VDO allows to set large variety of options. Lots of these settings
can be specified by lvm.conf or profile settings. User can prepare
number of different profiles in #DEFAULT_SYS_DIR#/profile directory
@@ -144,7 +126,6 @@ Check output of \fBlvmconfig --type full\fP for detailed description
of all individual vdo settings.
.I Example
-.br
.nf
# cat <<EOF > #DEFAULT_SYS_DIR#/profile/vdo_create.profile
allocation {
@@ -173,10 +154,8 @@ EOF
# lvcreate --vdo -L10G --metadataprofile vdo_create vg/vdopool0
# lvcreate --vdo -L10G --config 'allocation/vdo_cpu_threads=4' vg/vdopool1
.fi
-
.SS 4. Change compression and deduplication of VDOPoolLV
-
-Disable or enable compression and deduplication for VDO pool LV
+Disable or enable compression and deduplication for VDOPoolLV
(the volume that maintains all VDO LV(s) associated with it).
.nf
@@ -184,14 +163,11 @@ Disable or enable compression and deduplication for VDO pool LV
.fi
.I Example
-.br
.nf
-# lvchange --compression n vg/vdpool0
-# lvchange --deduplication y vg/vdpool1
+# lvchange --compression n vg/vdopool0
+# lvchange --deduplication y vg/vdopool1
.fi
-
.SS 5. Checking usage of VDOPoolLV
-
To quickly check how much data of VDOPoolLV are already consumed
use \fBlvs\fP(8). Field Data% will report how much data occupies
content of virtual data for VDOLV and how much space is already
@@ -201,7 +177,6 @@ For a detailed description use \fBvdostats\fP(8) command.
Note: \fBvdostats\fP(8) currently understands only /dev/mapper device names.
.I Example
-.br
.nf
# lvcreate --type vdo -L10G -V20G -n vdo0 vg/vdopool0
# mkfs.ext4 -E nodiscard /dev/vg/vdo0
@@ -219,12 +194,16 @@ Note: \fBvdostats\fP(8) currently understands only /dev/mapper device names.
data blocks used : 79
...
.fi
-
.SS 6. Extending VDOPoolLV size
-
Adding more space to hold VDO data and metadata can be made via
extension of VDODataLV with commands
\fBlvresize\fP(8), \fBlvextend\fP(8).
+Extension needs to add at least one new VDO slab which can be
+configured with \fBallocation/vdo_slab_size_mb\fP setting.
+
+User can also enable automatic size extension of monitored VDOPoolLV
+with \fBactivation/vdo_pool_autoextend_percent\fP and
+\fBactivation/vdo_pool_autoextend_threshold\fP settings.
Note: Size of VDOPoolLV cannot be reduced.
@@ -235,15 +214,12 @@ Note: Size of cached VDOPoolLV cannot be changed.
.fi
.I Example
-.br
.nf
# lvextend -L+50G vg/vdopool0
# lvresize -L300G vg/vdopool1
.fi
-
.SS 7. Extending or reducing VDOLV size
-
-VDO LV can be extended or reduced as standard LV with commands
+Virtual VDO LV can be extended or reduced as standard LV with commands
\fBlvresize\fP(8), \fBlvextend\fP(8), \fBlvreduce\fP(8).
Note: Reduction needs to process TRIM for reduced disk area
@@ -256,79 +232,61 @@ a long time.
.fi
.I Example
-.br
.nf
# lvextend -L+50G vg/vdo0
# lvreduce -L-50G vg/vdo1
# lvresize -L200G vg/vdo2
.fi
-
.SS 8. Component activation of VDODataLV
-
VDODataLV can be activated separately as component LV for examination
purposes. It activates data LV in read-only mode and cannot be modified.
If the VDODataLV is active as component, any upper LV using this volume CANNOT
be activated. User has to deactivate VDODataLV first to continue to use VDOPoolLV.
.I Example
-.br
.nf
# lvchange -ay vg/vpool0_vdata
# lvchange -an vg/vpool0_vdata
.fi
-
-
-.SH VDO Topics
-
+.SH VDO TOPICS
.SS 1. Stacking VDO
-
-User can convert/stack VDO with existing volumes.
-
-.SS 2. VDO on top of raid
-
-Using Raid type LV for VDO Data LV.
+User can convert/stack VDOPooLV with these currently supported
+volume types: linear, stripe, raid and cache with cachepool
+.SS 2. VDOPoolLV on top of raid
+Using raid type LV for VDODataLV.
.I Example
-.br
.nf
-# lvcreate --type raid1 -L 5G -n vpool vg
-# lvconvert --type vdo-pool -V 10G vg/vpool
+# lvcreate --type raid1 -L 5G -n vdopool vg
+# lvconvert --type vdo-pool -V 10G vg/vdopool
.fi
-
.SS 3. Caching VDODataLV, VDOPoolLV
-
-VDO Pool LV (accepts also VDOPoolLV) caching provides mechanism
+VDODataLV (accepts also VDOPoolLV) caching provides mechanism
to accelerate read and write of already compressed and deduplicated
-blocks together with vdo metadata.
+data blocks together with VDO metadata.
-Cached VDO Data LV cannot be currently resized (also automatic
-resize will not work).
+Cached VDO data LV cannot be currently resized and also the threshold
+based automatic resize will not work.
.I Example
-.br
.nf
-# lvcreate --type vdo -L 5G -V 10G -n vdo1 vg/vpool
-# lvcreate --type cache-pool -L 1G -n cpool vg
-# lvconvert --cache --cachepool vg/cpool vg/vpool
-# lvconvert --uncache vg/vpool
+# lvcreate --type vdo -L 5G -V 10G -n vdo1 vg/vdopool
+# lvcreate --type cache-pool -L 1G -n cachepool vg
+# lvconvert --cache --cachepool vg/cachepool vg/vdopool
+# lvconvert --uncache vg/vdopool
.fi
-
.SS 4. Caching VDOLV
-
VDO LV cache allow users to 'cache' device for better perfomance before
it hits processing of VDO Pool LV layer.
.I Example
-.br
.nf
-# lvcreate -L 5G -V 10G -n vdo1 vg/vpool
-# lvcreate --type cache-pool -L 1G -n cpool vg
-# lvconvert --cache --cachepool vg/cpool vg/vdo1
+# lvcreate -L 5G -V 10G -n vdo1 vg/vdopool
+# lvcreate --type cache-pool -L 1G -n cachepool vg
+# lvconvert --cache --cachepool vg/cachepool vg/vdo1
# lvconvert --uncache vg/vdo1
.fi
-
.SS 5. Usage of Discard/TRIM with VDOLV
-
User can discard data in VDO LV and reduce used blocks in VDOPoolLV.
However present performance of discard operation is still not optimal
and takes considerable amount of time and CPU.
@@ -342,10 +300,53 @@ provisioning in other regions of VDO LV.
For the same reason, user should avoid using mkfs with discard for
freshly created VDO LV to save a lot of time this operation would
take otherwise as device after create empty.
-
-.br
-
-\&
+.SS 6. Memory usage
+VDO target requires 370 MiB of RAM plus an additional 268 MiB
+per each 1 TiB of physical storage managed by the volume.
+
+UDS requires a minimum of 250 MiB of RAM,
+which is also the default amount that deduplication uses.
+
+The memory required for the UDS index is determined by the index type
+and the required size of the deduplication window and
+is controled by \fBallocation/vdo_use_sparse_index\fP setting.
+
+With enabled UDS sparse indexing it relies on the temporal locality of data
+and attempts to retain only the most relevant index entries in memory and
+can maintain a deduplication window that is ten times larger
+than with dense while using the same amount of memory.
+
+Although the sparse index provides the greatest coverage,
+the dense index provides more deduplication advice.
+For most workloads, given the same amount of memory,
+the difference in deduplication rates between dense
+and sparse indexes is negligible.
+
+Dense index with 1 GiB of RAM maintains 1 TiB deduplication window,
+while sparse index with 1 GiB of RAM maintains 10 TiB deduplication window.
+In general 1 GiB is sufficient for 4 TiB or physical space with
+dense index and 40 TiB with sparse index.
+.SS 7. Storage space requirements
+User can configure a VDOPoolLV to use up to 256 TiB of physical storage.
+Only a certain part of the physical storage is usable to store data.
+This section provides the calculations to determine the usable size
+of a VDO-managed volume.
+
+VDO target requires storage for two types of VDO metadata and for the UDS index:
+.TP
+\(bu
+The first type of VDO metadata uses approximately 1 MiB for each 4 GiB
+of physical storage plus an additional 1 MiB per slab.
+.TP
+\(bu
+The second type of VDO metadata consumes approximately 1.25 MiB
+for each 1 GiB of logical storage, rounded up to the nearest slab.
+.TP
+\(bu
+The amount of storage required for the UDS index depends on the type of index
+and the amount of RAM allocated to the index. For each 1 GiB of RAM,
+a dense UDS index uses 17 GiB of storage and a sparse UDS index will use
+170 GiB of storage.
.SH SEE ALSO
.BR lvm (8),
2 years, 10 months