[mdadm/f15] mdadm: fix udev incremental assembly rules file

Doug Ledford dledford at fedoraproject.org
Thu Aug 25 19:37:34 UTC 2011


commit fde9b1807d7e7cfcb2697981a297f96c81e20df5
Author: Doug Ledford <dledford at redhat.com>
Date:   Thu Aug 25 15:24:06 2011 -0400

    mdadm: fix udev incremental assembly rules file
    
    Added support for nested md devices, md on top of LVM devices such as
    encrypted partitions (although I don't recommend that, I recommend
    encrypting the md device instead), and support for md devices on top
    of multipath dm devices.
    
    Signed-off-by: Doug Ledford <dledford at redhat.com>
    (cherry picked from commit 6ffc771fa2630eda55e6c256fffbd01b0c23438d)
    
    Signed-off-by: Doug Ledford <dledford at redhat.com>

 mdadm.rules |   48 ++++++++++++++++++++++++++++++++++++++++++------
 mdadm.spec  |    8 +++++++-
 2 files changed, 49 insertions(+), 7 deletions(-)
---
diff --git a/mdadm.rules b/mdadm.rules
index ddd9e67..46cd926 100644
--- a/mdadm.rules
+++ b/mdadm.rules
@@ -2,16 +2,57 @@
 # automatically cause mdadm to be run.
 # See udev(8) for syntax
 
+# Don't process any events if anaconda is running as anaconda brings up
+# raid devices manually
 ENV{ANACONDA}=="?*", GOTO="md_imsm_inc_end"
+
+# Also don't process disks that are slated to be a multipath device
+ENV{DM_MULTIPATH_DEVICE_PATH}=="?*", GOTO="md_imsm_inc_end"
+
+# We process add events on block devices (since they are ready as soon as
+# they are added to the system), but we must process change events as well
+# on any dm devices (like LUKS partitions or LVM logical volumes) and on
+# md devices because both of these first get added, then get brought live
+# and trigger a change event.  The reason we don't process change events
+# on bare hard disks is because if you stop all arrays on a disk, then
+# run fdisk on the disk to change the partitions, when fdisk exits it
+# triggers a change event, and we want to wait until all the fdisks on
+# all member disks are done before we do anything.  Unfortunately, we have
+# no way of knowing that, so we just have to let those arrays be brought
+# up manually after fdisk has been run on all of the disks.
+
+# First, process all add events (md and dm devices will not really do
+# anything here, just regular disks, and this also won't get any imsm
+# array members either)
 SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="linux_raid_member", \
 	RUN+="/sbin/mdadm -I $env{DEVNAME}"
 SUBSYSTEM=="block", ACTION=="remove", ENV{ID_FS_TYPE}=="linux_raid_member", \
 	RUN+="/sbin/mdadm -If $name --path $env{ID_PATH}"
 
+# Next, check to make sure the BIOS raid stuff wasn't turned off via cmdline
 IMPORT{cmdline}="noiswmd"
 IMPORT{cmdline}="nodmraid"
 ENV{noiswmd}=="?*", GOTO="md_imsm_inc_end"
 ENV{nodmraid}=="?*", GOTO="md_imsm_inc_end"
+SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="isw_raid_member", \
+	RUN+="/sbin/mdadm -I $env{DEVNAME}"
+SUBSYSTEM=="block", ACTION=="remove", ENV{ID_FS_TYPE}=="isw_raid_member", \
+	RUN+="/sbin/mdadm -If $name --path $env{ID_PATH}"
+LABEL="md_imsm_inc_end"
+
+# Next make sure that this isn't a dm device we should skip for some reason
+ENV{DM_UDEV_RULES_VSN}!="?*", GOTO="dm_change_end"
+ENV{DM_UDEV_DISABLE_DISK_RULES_FLAG}=="1", GOTO="dm_change_end"
+ENV{DM_SUSPENDED}=="1", GOTO="dm_change_end"
+KERNEL=="dm-*", SUBSYSTEM=="block", ENV{ID_FS_TYPE}=="linux_raid_member", \
+	ACTION=="change", RUN+="/sbin/mdadm -I $env{DEVNAME}"
+LABEL="dm_change_end"
+
+# Finally catch any nested md raid arrays.  If we brought up an md raid
+# array that's part of another md raid array, it won't be ready to be used
+# until the change event that occurs when it becomes live
+KERNEL=="md*", SUBSYSTEM=="block", ENV{ID_FS_TYPE}=="linux_raid_member", \
+	ACTION=="change", RUN+="/sbin/mdadm -I $env{DEVNAME}"
 
 # In case the initramfs only started some of the arrays in our container,
 # run incremental assembly on the container itself.  Note: we ran mdadm
@@ -20,12 +61,7 @@ ENV{nodmraid}=="?*", GOTO="md_imsm_inc_end"
 # file, we will need to add this line into the middle of the next rule:
 #	IMPORT{program}="/sbin/mdadm -D --export $tempnode", \
 
-SUBSYSTEM=="block", ACTION=="add", KERNEL=="md*", \
+SUBSYSTEM=="block", ACTION=="add|change", KERNEL=="md*", \
 	ENV{MD_LEVEL}=="container", RUN+="/sbin/mdadm -I $env{DEVNAME}"
 
-SUBSYSTEM=="block", ACTION=="add", ENV{ID_FS_TYPE}=="isw_raid_member", \
-	RUN+="/sbin/mdadm -I $env{DEVNAME}"
-SUBSYSTEM=="block", ACTION=="remove", ENV{ID_FS_TYPE}=="isw_raid_member", \
-	RUN+="/sbin/mdadm -If $name --path $env{ID_PATH}"
-LABEL="md_imsm_inc_end"
 
diff --git a/mdadm.spec b/mdadm.spec
index 55b995c..e4995ff 100644
--- a/mdadm.spec
+++ b/mdadm.spec
@@ -1,7 +1,7 @@
 Summary:     The mdadm program controls Linux md devices (software RAID arrays)
 Name:        mdadm
 Version:     3.2.2
-Release:     7%{?dist}
+Release:     8%{?dist}
 Source:      http://www.kernel.org/pub/linux/utils/raid/mdadm/mdadm-%{version}.tar.bz2
 Source1:     mdmonitor.init
 Source2:     raid-check
@@ -98,6 +98,12 @@ fi
 %config(noreplace) %{_sysconfdir}/tmpfiles.d/%{name}.conf
 
 %changelog
+* Thu Aug 25 2011 Doug Ledford <dledford at redhat.com> - 3.2.2-8
+- Rework the 65-md-incremental.rules file to add the following support:
+  Nested md raid arrays should now work
+  MD on top of LUKS or other lvm based devices should now work
+  We should no longer grab multipath paths before multipath can
+
 * Wed Jul 27 2011 Doug Ledford <dledford at redhat.com> - 3.2.2-7
 - Fix a bug with readding a device
 - Fix a bug with writemostly flag handling


More information about the scm-commits mailing list