summaryrefslogtreecommitdiff
path: root/debian/patches
diff options
context:
space:
mode:
authormadduck <madduck@3cfab66f-1918-0410-86b3-c06b76f9a464>2006-07-08 22:24:52 +0000
committermadduck <madduck@3cfab66f-1918-0410-86b3-c06b76f9a464>2006-07-08 22:24:52 +0000
commit724cca90e642a6bfbdd212dbf9b838187b27267e (patch)
treeb852ad629769e0c9738f7170eb9c861f4be25bec /debian/patches
parentc1aba7fa922cc2762f4044efb5fb574981a71703 (diff)
Load newtrunk into mdadm/trunk.
Diffstat (limited to 'debian/patches')
-rw-r--r--debian/patches/00list2
-rwxr-xr-xdebian/patches/01-mdadm.conf-location.dpatch117
-rwxr-xr-xdebian/patches/99-md.txt.dpatch368
-rw-r--r--debian/patches/Makefile3
4 files changed, 490 insertions, 0 deletions
diff --git a/debian/patches/00list b/debian/patches/00list
new file mode 100644
index 00000000..5e6ff3b5
--- /dev/null
+++ b/debian/patches/00list
@@ -0,0 +1,2 @@
+01-mdadm.conf-location
+99-md.txt
diff --git a/debian/patches/01-mdadm.conf-location.dpatch b/debian/patches/01-mdadm.conf-location.dpatch
new file mode 100755
index 00000000..095ad5ff
--- /dev/null
+++ b/debian/patches/01-mdadm.conf-location.dpatch
@@ -0,0 +1,117 @@
+#! /bin/sh /usr/share/dpatch/dpatch-run
+## 01-mdadm.conf-location.dpatch by martin f. krafft <madduck@debian.org>
+##
+## All lines beginning with `## DP:' are a description of the patch.
+## DP: Reverses conffile locations to favour /etc/mdadm/mdadm.conf
+
+@DPATCH@
+diff -urNad mdadm-2.5.1~/Makefile mdadm-2.5.1/Makefile
+--- mdadm-2.5.1~/Makefile 2006-06-26 00:06:29.000000000 +0200
++++ mdadm-2.5.1/Makefile 2006-06-26 00:06:45.936514898 +0200
+@@ -48,8 +48,8 @@
+ endif
+
+ SYSCONFDIR = /etc
+-CONFFILE = $(SYSCONFDIR)/mdadm.conf
+-CONFFILE2 = $(SYSCONFDIR)/mdadm/mdadm.conf
++CONFFILE = $(SYSCONFDIR)/mdadm/mdadm.conf
++CONFFILE2 = $(SYSCONFDIR)/mdadm.conf
+ MAILCMD =/usr/sbin/sendmail -t
+ CONFFILEFLAGS = -DCONFFILE=\"$(CONFFILE)\" -DCONFFILE2=\"$(CONFFILE2)\"
+ CFLAGS = $(CWFLAGS) $(CXFLAGS) -DSendmail=\""$(MAILCMD)"\" $(CONFFILEFLAGS)
+diff -urNad mdadm-2.5.1~/ReadMe.c mdadm-2.5.1/ReadMe.c
+--- mdadm-2.5.1~/ReadMe.c 2006-06-25 23:52:13.000000000 +0200
++++ mdadm-2.5.1/ReadMe.c 2006-06-26 00:06:34.108601730 +0200
+@@ -504,7 +504,7 @@
+
+
+ char Help_config[] =
+-"The /etc/mdadm.conf config file:\n\n"
++"The /etc/mdadm/mdadm.conf config file:\n\n"
+ " The config file contains, apart from blank lines and comment lines that\n"
+ " start with a hash(#), four sorts of configuration lines: array lines, \n"
+ " device lines, mailaddr lines and program lines.\n"
+diff -urNad mdadm-2.5.1~/mdadm.8 mdadm-2.5.1/mdadm.8
+--- mdadm-2.5.1~/mdadm.8 2006-06-25 23:52:13.000000000 +0200
++++ mdadm-2.5.1/mdadm.8 2006-06-26 00:06:34.116601671 +0200
+@@ -225,9 +225,9 @@
+ .TP
+ .BR -c ", " --config=
+ Specify the config file. Default is to use
+-.BR /etc/mdadm.conf ,
++.BR /etc/mdadm/mdadm.conf ,
+ or if that is missing, then
+-.BR /etc/mdadm/mdadm.conf .
++.BR /etc/mdadm.conf .
+ If the config file given is
+ .B partitions
+ then nothing will be read, but
+@@ -253,7 +253,7 @@
+ permission to get any missing information, like component devices,
+ array devices, array identities, and alert destination from the
+ configuration file:
+-.BR /etc/mdadm.conf .
++.BR /etc/mdadm/mdadm.conf .
+ One exception is MISC mode when using
+ .B --detail
+ or
+@@ -985,7 +985,7 @@
+ or requested with (a possibly implicit)
+ .B --scan.
+ In the later case,
+-.B /etc/mdadm.conf
++.B /etc/mdadm/mdadm.conf
+ is used.
+
+ If
+@@ -1260,7 +1260,7 @@
+ .B --scan
+ will cause the output to be less detailed and the format to be
+ suitable for inclusion in
+-.BR /etc/mdadm.conf .
++.BR /etc/mdadm/mdadm.conf .
+ The exit status of
+ .I mdadm
+ will normally be 0 unless
+@@ -1296,7 +1296,7 @@
+ then multiple devices that are components of the one array
+ are grouped together and reported in a single entry suitable
+ for inclusion in
+-.BR /etc/mdadm.conf .
++.BR /etc/mdadm/mdadm.conf .
+
+ Having
+ .B --scan
+@@ -1706,7 +1706,7 @@
+ on Monitor mode.
+
+
+-.SS /etc/mdadm.conf
++.SS /etc/mdadm/mdadm.conf
+
+ The config file lists which devices may be scanned to see if
+ they contain MD super block, and gives identifying information
+diff -urNad mdadm-2.5.1~/mdadm.conf.5 mdadm-2.5.1/mdadm.conf.5
+--- mdadm-2.5.1~/mdadm.conf.5 2006-06-25 23:52:13.000000000 +0200
++++ mdadm-2.5.1/mdadm.conf.5 2006-06-26 00:06:34.116601671 +0200
+@@ -2,7 +2,7 @@
+ .SH NAME
+ mdadm.conf \- configuration for management of Software Raid with mdadm
+ .SH SYNOPSIS
+-/etc/mdadm.conf
++/etc/mdadm/mdadm.conf
+ .SH DESCRIPTION
+ .PP
+ .B mdadm
+diff -urNad mdadm-2.5.1~/mdassemble.8 mdadm-2.5.1/mdassemble.8
+--- mdadm-2.5.1~/mdassemble.8 2006-06-25 23:52:13.000000000 +0200
++++ mdadm-2.5.1/mdassemble.8 2006-06-26 00:06:34.116601671 +0200
+@@ -33,7 +33,7 @@
+
+ .SH FILES
+
+-.SS /etc/mdadm.conf
++.SS /etc/mdadm/mdadm.conf
+
+ The config file lists which devices may be scanned to see if
+ they contain MD super block, and gives identifying information
diff --git a/debian/patches/99-md.txt.dpatch b/debian/patches/99-md.txt.dpatch
new file mode 100755
index 00000000..eb56ba83
--- /dev/null
+++ b/debian/patches/99-md.txt.dpatch
@@ -0,0 +1,368 @@
+#! /bin/sh /usr/share/dpatch/dpatch-run
+## 99-md.txt.dpatch by martin f. krafft <madduck@debian.org>
+##
+## All lines beginning with `## DP:' are a description of the patch.
+## DP: No description.
+
+@DPATCH@
+diff -urNad mdadm-2.5.2~/md.txt mdadm-2.5.2/md.txt
+--- mdadm-2.5.2~/md.txt 1970-01-01 01:00:00.000000000 +0100
++++ mdadm-2.5.2/md.txt 2006-07-06 18:28:20.213989423 +0200
+@@ -0,0 +1,357 @@
++Tools that manage md devices can be found at
++ http://www.<country>.kernel.org/pub/linux/utils/raid/....
++
++
++Boot time assembly of RAID arrays
++---------------------------------
++
++You can boot with your md device with the following kernel command
++lines:
++
++for old raid arrays without persistent superblocks:
++ md=<md device no.>,<raid level>,<chunk size factor>,<fault level>,dev0,dev1,...,devn
++
++for raid arrays with persistent superblocks
++ md=<md device no.>,dev0,dev1,...,devn
++or, to assemble a partitionable array:
++ md=d<md device no.>,dev0,dev1,...,devn
++
++md device no. = the number of the md device ...
++ 0 means md0,
++ 1 md1,
++ 2 md2,
++ 3 md3,
++ 4 md4
++
++raid level = -1 linear mode
++ 0 striped mode
++ other modes are only supported with persistent super blocks
++
++chunk size factor = (raid-0 and raid-1 only)
++ Set the chunk size as 4k << n.
++
++fault level = totally ignored
++
++dev0-devn: e.g. /dev/hda1,/dev/hdc1,/dev/sda1,/dev/sdb1
++
++A possible loadlin line (Harald Hoyer <HarryH@Royal.Net>) looks like this:
++
++e:\loadlin\loadlin e:\zimage root=/dev/md0 md=0,0,4,0,/dev/hdb2,/dev/hdc3 ro
++
++
++Boot time autodetection of RAID arrays
++--------------------------------------
++
++When md is compiled into the kernel (not as module), partitions of
++type 0xfd are scanned and automatically assembled into RAID arrays.
++This autodetection may be suppressed with the kernel parameter
++"raid=noautodetect". As of kernel 2.6.9, only drives with a type 0
++superblock can be autodetected and run at boot time.
++
++The kernel parameter "raid=partitionable" (or "raid=part") means
++that all auto-detected arrays are assembled as partitionable.
++
++Boot time assembly of degraded/dirty arrays
++-------------------------------------------
++
++If a raid5 or raid6 array is both dirty and degraded, it could have
++undetectable data corruption. This is because the fact that it is
++'dirty' means that the parity cannot be trusted, and the fact that it
++is degraded means that some datablocks are missing and cannot reliably
++be reconstructed (due to no parity).
++
++For this reason, md will normally refuse to start such an array. This
++requires the sysadmin to take action to explicitly start the array
++desipite possible corruption. This is normally done with
++ mdadm --assemble --force ....
++
++This option is not really available if the array has the root
++filesystem on it. In order to support this booting from such an
++array, md supports a module parameter "start_dirty_degraded" which,
++when set to 1, bypassed the checks and will allows dirty degraded
++arrays to be started.
++
++So, to boot with a root filesystem of a dirty degraded raid[56], use
++
++ md-mod.start_dirty_degraded=1
++
++
++Superblock formats
++------------------
++
++The md driver can support a variety of different superblock formats.
++Currently, it supports superblock formats "0.90.0" and the "md-1" format
++introduced in the 2.5 development series.
++
++The kernel will autodetect which format superblock is being used.
++
++Superblock format '0' is treated differently to others for legacy
++reasons - it is the original superblock format.
++
++
++General Rules - apply for all superblock formats
++------------------------------------------------
++
++An array is 'created' by writing appropriate superblocks to all
++devices.
++
++It is 'assembled' by associating each of these devices with an
++particular md virtual device. Once it is completely assembled, it can
++be accessed.
++
++An array should be created by a user-space tool. This will write
++superblocks to all devices. It will usually mark the array as
++'unclean', or with some devices missing so that the kernel md driver
++can create appropriate redundancy (copying in raid1, parity
++calculation in raid4/5).
++
++When an array is assembled, it is first initialized with the
++SET_ARRAY_INFO ioctl. This contains, in particular, a major and minor
++version number. The major version number selects which superblock
++format is to be used. The minor number might be used to tune handling
++of the format, such as suggesting where on each device to look for the
++superblock.
++
++Then each device is added using the ADD_NEW_DISK ioctl. This
++provides, in particular, a major and minor number identifying the
++device to add.
++
++The array is started with the RUN_ARRAY ioctl.
++
++Once started, new devices can be added. They should have an
++appropriate superblock written to them, and then passed be in with
++ADD_NEW_DISK.
++
++Devices that have failed or are not yet active can be detached from an
++array using HOT_REMOVE_DISK.
++
++
++Specific Rules that apply to format-0 super block arrays, and
++ arrays with no superblock (non-persistent).
++-------------------------------------------------------------
++
++An array can be 'created' by describing the array (level, chunksize
++etc) in a SET_ARRAY_INFO ioctl. This must has major_version==0 and
++raid_disks != 0.
++
++Then uninitialized devices can be added with ADD_NEW_DISK. The
++structure passed to ADD_NEW_DISK must specify the state of the device
++and it's role in the array.
++
++Once started with RUN_ARRAY, uninitialized spares can be added with
++HOT_ADD_DISK.
++
++
++
++MD devices in sysfs
++-------------------
++md devices appear in sysfs (/sys) as regular block devices,
++e.g.
++ /sys/block/md0
++
++Each 'md' device will contain a subdirectory called 'md' which
++contains further md-specific information about the device.
++
++All md devices contain:
++ level
++ a text file indicating the 'raid level'. This may be a standard
++ numerical level prefixed by "RAID-" - e.g. "RAID-5", or some
++ other name such as "linear" or "multipath".
++ If no raid level has been set yet (array is still being
++ assembled), this file will be empty.
++
++ raid_disks
++ a text file with a simple number indicating the number of devices
++ in a fully functional array. If this is not yet known, the file
++ will be empty. If an array is being resized (not currently
++ possible) this will contain the larger of the old and new sizes.
++ Some raid level (RAID1) allow this value to be set while the
++ array is active. This will reconfigure the array. Otherwise
++ it can only be set while assembling an array.
++
++ chunk_size
++ This is the size if bytes for 'chunks' and is only relevant to
++ raid levels that involve striping (1,4,5,6,10). The address space
++ of the array is conceptually divided into chunks and consecutive
++ chunks are striped onto neighbouring devices.
++ The size should be atleast PAGE_SIZE (4k) and should be a power
++ of 2. This can only be set while assembling an array
++
++ component_size
++ For arrays with data redundancy (i.e. not raid0, linear, faulty,
++ multipath), all components must be the same size - or at least
++ there must a size that they all provide space for. This is a key
++ part or the geometry of the array. It is measured in sectors
++ and can be read from here. Writing to this value may resize
++ the array if the personality supports it (raid1, raid5, raid6),
++ and if the component drives are large enough.
++
++ metadata_version
++ This indicates the format that is being used to record metadata
++ about the array. It can be 0.90 (traditional format), 1.0, 1.1,
++ 1.2 (newer format in varying locations) or "none" indicating that
++ the kernel isn't managing metadata at all.
++
++ level
++ The raid 'level' for this array. The name will often (but not
++ always) be the same as the name of the module that implements the
++ level. To be auto-loaded the module must have an alias
++ md-$LEVEL e.g. md-raid5
++ This can be written only while the array is being assembled, not
++ after it is started.
++
++ new_dev
++ This file can be written but not read. The value written should
++ be a block device number as major:minor. e.g. 8:0
++ This will cause that device to be attached to the array, if it is
++ available. It will then appear at md/dev-XXX (depending on the
++ name of the device) and further configuration is then possible.
++
++ sync_speed_min
++ sync_speed_max
++ This are similar to /proc/sys/dev/raid/speed_limit_{min,max}
++ however they only apply to the particular array.
++ If no value has been written to these, of if the word 'system'
++ is written, then the system-wide value is used. If a value,
++ in kibibytes-per-second is written, then it is used.
++ When the files are read, they show the currently active value
++ followed by "(local)" or "(system)" depending on whether it is
++ a locally set or system-wide value.
++
++ sync_completed
++ This shows the number of sectors that have been completed of
++ whatever the current sync_action is, followed by the number of
++ sectors in total that could need to be processed. The two
++ numbers are separated by a '/' thus effectively showing one
++ value, a fraction of the process that is complete.
++
++ sync_speed
++ This shows the current actual speed, in K/sec, of the current
++ sync_action. It is averaged over the last 30 seconds.
++
++
++As component devices are added to an md array, they appear in the 'md'
++directory as new directories named
++ dev-XXX
++where XXX is a name that the kernel knows for the device, e.g. hdb1.
++Each directory contains:
++
++ block
++ a symlink to the block device in /sys/block, e.g.
++ /sys/block/md0/md/dev-hdb1/block -> ../../../../block/hdb/hdb1
++
++ super
++ A file containing an image of the superblock read from, or
++ written to, that device.
++
++ state
++ A file recording the current state of the device in the array
++ which can be a comma separated list of
++ faulty - device has been kicked from active use due to
++ a detected fault
++ in_sync - device is a fully in-sync member of the array
++ spare - device is working, but not a full member.
++ This includes spares that are in the process
++ of being recoverred to
++ This list make grow in future.
++
++ errors
++ An approximate count of read errors that have been detected on
++ this device but have not caused the device to be evicted from
++ the array (either because they were corrected or because they
++ happened while the array was read-only). When using version-1
++ metadata, this value persists across restarts of the array.
++
++ This value can be written while assembling an array thus
++ providing an ongoing count for arrays with metadata managed by
++ userspace.
++
++ slot
++ This gives the role that the device has in the array. It will
++ either be 'none' if the device is not active in the array
++ (i.e. is a spare or has failed) or an integer less than the
++ 'raid_disks' number for the array indicating which possition
++ it currently fills. This can only be set while assembling an
++ array. A device for which this is set is assumed to be working.
++
++ offset
++ This gives the location in the device (in sectors from the
++ start) where data from the array will be stored. Any part of
++ the device before this offset us not touched, unless it is
++ used for storing metadata (Formats 1.1 and 1.2).
++
++ size
++ The amount of the device, after the offset, that can be used
++ for storage of data. This will normally be the same as the
++ component_size. This can be written while assembling an
++ array. If a value less than the current component_size is
++ written, component_size will be reduced to this value.
++
++
++An active md device will also contain and entry for each active device
++in the array. These are named
++
++ rdNN
++
++where 'NN' is the possition in the array, starting from 0.
++So for a 3 drive array there will be rd0, rd1, rd2.
++These are symbolic links to the appropriate 'dev-XXX' entry.
++Thus, for example,
++ cat /sys/block/md*/md/rd*/state
++will show 'in_sync' on every line.
++
++
++
++Active md devices for levels that support data redundancy (1,4,5,6)
++also have
++
++ sync_action
++ a text file that can be used to monitor and control the rebuild
++ process. It contains one word which can be one of:
++ resync - redundancy is being recalculated after unclean
++ shutdown or creation
++ recover - a hot spare is being built to replace a
++ failed/missing device
++ idle - nothing is happening
++ check - A full check of redundancy was requested and is
++ happening. This reads all block and checks
++ them. A repair may also happen for some raid
++ levels.
++ repair - A full check and repair is happening. This is
++ similar to 'resync', but was requested by the
++ user, and the write-intent bitmap is NOT used to
++ optimise the process.
++
++ This file is writable, and each of the strings that could be
++ read are meaningful for writing.
++
++ 'idle' will stop an active resync/recovery etc. There is no
++ guarantee that another resync/recovery may not be automatically
++ started again, though some event will be needed to trigger
++ this.
++ 'resync' or 'recovery' can be used to restart the
++ corresponding operation if it was stopped with 'idle'.
++ 'check' and 'repair' will start the appropriate process
++ providing the current state is 'idle'.
++
++ mismatch_count
++ When performing 'check' and 'repair', and possibly when
++ performing 'resync', md will count the number of errors that are
++ found. The count in 'mismatch_cnt' is the number of sectors
++ that were re-written, or (for 'check') would have been
++ re-written. As most raid levels work in units of pages rather
++ than sectors, this my be larger than the number of actual errors
++ by a factor of the number of sectors in a page.
++
++Each active md device may also have attributes specific to the
++personality module that manages it.
++These are specific to the implementation of the module and could
++change substantially if the implementation changes.
++
++These currently include
++
++ stripe_cache_size (currently raid5 only)
++ number of entries in the stripe cache. This is writable, but
++ there are upper and lower limits (32768, 16). Default is 128.
++ strip_cache_active (currently raid5 only)
++ number of active entries in the stripe cache
diff --git a/debian/patches/Makefile b/debian/patches/Makefile
new file mode 100644
index 00000000..18a3802b
--- /dev/null
+++ b/debian/patches/Makefile
@@ -0,0 +1,3 @@
+.PHONY: 00list
+00list:
+ find . -name Makefile -prune -o -name 00list -prune -o -type f -print | sed -e 's,\./\(.*\)\.dpatch,\1,' | sort -n > $@