summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--debian/NEWS41
-rw-r--r--debian/README.Debian41
-rw-r--r--debian/README.experimental55
-rw-r--r--debian/README.mdrun43
-rw-r--r--debian/README.recipes128
-rw-r--r--debian/bugscript61
-rw-r--r--debian/changelog589
-rw-r--r--debian/checkarray120
-rw-r--r--debian/compat1
-rw-r--r--debian/control28
-rw-r--r--debian/copyright21
-rw-r--r--debian/docs2
-rw-r--r--debian/initramfs/hook103
-rw-r--r--debian/initramfs/script.local-top93
-rw-r--r--debian/mdadm-raid214
-rw-r--r--debian/mdadm-udeb.dirs2
-rw-r--r--debian/mdadm.config123
-rw-r--r--debian/mdadm.cron.d9
-rw-r--r--debian/mdadm.dirs7
-rw-r--r--debian/mdadm.docs3
-rw-r--r--debian/mdadm.examples2
-rw-r--r--debian/mdadm.init63
-rw-r--r--debian/mdadm.lintian-overrides1
-rw-r--r--debian/mdadm.manpages4
-rw-r--r--debian/mdadm.postinst137
-rw-r--r--debian/mdadm.postrm30
-rw-r--r--debian/mdadm.preinst12
-rw-r--r--debian/mdadm.templates71
-rw-r--r--debian/mdrun189
-rw-r--r--debian/mdrun.865
-rw-r--r--debian/mkconf30
-rw-r--r--debian/patches/00list2
-rwxr-xr-xdebian/patches/01-mdadm.conf-location.dpatch117
-rwxr-xr-xdebian/patches/99-md.txt.dpatch368
-rw-r--r--debian/patches/Makefile3
-rw-r--r--debian/po/POTFILES.in1
-rw-r--r--debian/po/cs.po213
-rw-r--r--debian/po/de.po207
-rw-r--r--debian/po/fr.po205
-rw-r--r--debian/po/ja.po205
-rw-r--r--debian/po/nl.po208
-rw-r--r--debian/po/pt_BR.po232
-rw-r--r--debian/po/ru.po204
-rw-r--r--debian/po/sv.po218
-rw-r--r--debian/po/templates.pot162
-rw-r--r--debian/po/vi.po197
-rw-r--r--debian/rootraiddoc.97.html1333
-rwxr-xr-xdebian/rules103
-rw-r--r--debian/watch2
49 files changed, 6268 insertions, 0 deletions
diff --git a/debian/NEWS b/debian/NEWS
new file mode 100644
index 00000000..b6c27b1f
--- /dev/null
+++ b/debian/NEWS
@@ -0,0 +1,41 @@
+mdadm (2.5-1) unstable; urgency=low
+
+ mdrun has been (finally) obsoleted, and an appropriate warning message is
+ written to the console if you (or a script) attempts to run it. If you
+ cannot live without mdrun, you can disable the warning by setting
+ USE_DEPRECATED_MDRUN=1 in /etc/default/mdadm. Note that mdrun will *not* be
+ supported. Please also see /usr/share/doc/mdadm/README.mdrun .
+
+ -- martin f. krafft <madduck@debian.org> Tue, 30 May 2006 23:25:13 +0200
+
+mdadm (2.4.1-5) unstable; urgency=low
+
+ This version drops the automatic generation of the /etc/mdadm/mdadm.conf
+ file on every boot (if it was missing). This means that you need to ensure
+ that you have a valid configuration file. If none is present during package
+ configuration, mdadm *will* try to generate one, but it will only contain
+ information about arrays that were running at the time of package
+ configuration. Arrays not listed in the configuration file will *not* be
+ started automatically after boot (with the exception of the root partition).
+
+ If you want to recreate your configuration file, either figure out what it
+ should contain from the mdadm.conf(5) manpage, or simply assemble and run
+ all the arrays the way you like it, then run
+ /usr/share/mdadm/mkconf > /etc/mdadm/mdadm.conf
+
+ -- martin f. krafft <madduck@debian.org> Sat, 03 Jun 2006 17:45:47 +0200
+
+mdadm (2.4.1-1) unstable; urgency=low
+
+ As of version 2.3, mdadm uses /etc/mdadm.conf as its main configuration
+ file, and falls back to /etc/mdadm/mdadm.conf if the former is not found.
+ Since Debian uses /etc/mdadm/mdadm.conf as the configuration file path, this
+ order was reverted: Debian's mdadm reads /etc/mdadm/mdadm.conf as its main
+ file and falls back to /etc/mdadm.conf if the former is not found.
+
+ An incompatible change in the reshaping of RAID 5 arrays was made in this
+ upstream release. If you want to reshape a RAID 5 array with a version-1
+ superblock, please make sure to use mdadm 2.4.1 and at least a 2.6.17-rc2
+ kernel.
+
+ -- martin f. krafft <madduck@debian.org> Tue, 16 May 2006 13:07:49 -0500
diff --git a/debian/README.Debian b/debian/README.Debian
new file mode 100644
index 00000000..fb88f483
--- /dev/null
+++ b/debian/README.Debian
@@ -0,0 +1,41 @@
+mdadm for Debian
+================
+
+Please make sure you read into /usr/share/doc/mdadm/NEWS.Debian.gz . Also, the
+documentation for the kernel md driver is included in
+/usr/share/doc/mdadm/md.txt.gz as well as the md(4) manpage. *Do* read those!
+
+The configuration file
+~~~~~~~~~~~~~~~~~~~~~~
+This version of mdadm now needs /etc/mdadm/mdadm.conf, which it will try to
+generate during installation, if nonexistent.
+
+You can use the output of /usr/share/mdadm/mkconf to generate your very own
+mdadm.conf file, and look into
+/usr/share/doc/mdadm/examples/mdadm.conf-example for inspiration.
+
+Autostarting devices
+~~~~~~~~~~~~~~~~~~~~
+The mdadm.conf file controls which devices are to be started automatically by
+mdadm during boot (assuming AUTOSTART is set to true in /etc/default/mdadm),
+and various other parameters about how they are to be started. The file can
+also contain some control parameters for the mdadm monitor daemon. See
+mdadm.conf(5) for more information.
+
+Note: this only applies to modular kernels. If you use a monolithic kernel,
+you can control which devices are started automatically by changing the
+partition type: 0xfd for autostart, 0x83 to prevent autostart. mdadm does not
+actually care about the partition type, only the kernel does.
+
+Common recipes
+~~~~~~~~~~~~~~
+Check /usr/share/doc/mdadm/README.recipes.gz for some simple examples of how
+to do the most common stuff with mdadm.
+
+Upstream
+~~~~~~~~
+For completeness: The upstream repository is available from
+ http://neil.brown.name/cgi-bin/gitweb.cgi?p=mdadm
+ git clone git://neil.brown.name/mdadm
+
+ -- martin f. krafft <madduck@debian.org> Fri, 07 Jul 2006 10:55:42 +0200
diff --git a/debian/README.experimental b/debian/README.experimental
new file mode 100644
index 00000000..362b7eec
--- /dev/null
+++ b/debian/README.experimental
@@ -0,0 +1,55 @@
+mdadm for Debian - EXPERIMENTAL RELEASE
+---------------------------------------
+
+THIS IS AN EXPERIMENTAL RELEASE. DO NOT EVEN THINK ABOUT RUNNING IT ON
+PRODUCTION SERVERS. or at least don't blame me for it afterwards.
+
+None of the following needs to concern you if you are using monolithic kernels
+(no modules), yaird, or initrd-tools/mkinitrd.
+
+I will leave this version in experimental, until enough success reports have
+reached me at madduck@debian.org (and no failures). Thus, please report your
+experience to me.
+
+This version of mdadm basically addresses #367567 (but also provides a new
+upstream release, of course). The gist is that the hooks and scripts to take
+care of RAID during boot with an initramfs have been improved and moved into
+the mdadm package. mdrun has been deprecated on the way.
+
+As long as initramfs-tools still provides the old hooks and scripts (which use
+mdrun), the new ones will simply not do anything (which allows me to get rid
+of the conflict I needed previously). Once initramfs-tools removes the md
+hooks from the package, the new mdadm hooks will configure the initramfs to
+use mdadm for assembly (and not mdrun).
+
+Unless I've overlooked a detail, no interaction is required from the side of
+the user (apart from the new debconf question) to install and get mdadm
+running and integrated with initramfs-tools. It helps to ensure that
+update-initramfs produces output that squares with your own perception of what
+is needed to boot, and that the output of the script /usr/share/mdadm/mkconf
+is sane after installation and before you reboot.
+
+Also, you may want to provide yourself a safety net by making a copy of the
+initrd:
+
+ cp /boot/initrd.img-$(uname -r) /boot/initrd.img-$(uname -r).before-mdadm25
+
+and then to duplicate your main grub or lilo stanzas (rerun lilo!) and point
+them to the saved initrd. If things go haywire, you should be able to restore
+a working condition with the saved initrd.
+
+If you want to move to the new hooks and scripts immediately (without waiting
+for #367567 to be solved), do this:
+
+ rm /usr/share/initramfs-tools/hooks/md
+ sed -i -e 's,^PREREQ=\"md\"$,PREREQ=\"mdadm\",' \
+ /usr/share/initramfs-tools/scripts/local-top/lvm
+ update-initramfs -u -k$(uname -r)
+
+(that -k is due to #375671. If update-initramfs says something about
+initrd having been altered and refuses to do something, use -t unless you
+modified the initrd on purpose and don't want it overwritten.)
+
+Again, please report success or failure to me.
+
+ -- martin f. krafft <madduck@debian.org> Thu, 29 Jun 2006 01:07:09 +0200
diff --git a/debian/README.mdrun b/debian/README.mdrun
new file mode 100644
index 00000000..71834595
--- /dev/null
+++ b/debian/README.mdrun
@@ -0,0 +1,43 @@
+mdadm for Debian -- notes on mdrun
+==================================
+
+mdadm for Debian ships with /sbin/mdrun, which is a sledgehammer approach to
+assembling RAID volumes during boot. Unfortunately, the script has several
+problems (see e.g. #354705) and can cause major headaches.
+
+Fortunately, mdadm has since added all the functionality needed to replace
+mdrun. Thus, with mdadm 2.5, mdrun has been deprecated in Debian, and will be
+removed in a future release.
+
+mdrun may be used in three cases, and the following list includes what you can
+do to rid yourself of mdrun:
+
+ initramfs:
+ upgrade to the latest version of initramfs (and see README.experimental
+ for now).
+
+ custom scripts / command line use:
+ the command to replace mdrun is:
+
+ mdadm --assemble --scan --auto=yes
+
+ to be able to run this command, you need to ensure that
+ /etc/mdadm/mdadm.conf (or /etc/mdadm.conf, which is less preferred)
+ properly describes your RAID volumes. You can use the output generated by
+ the script /usr/share/mdadm/mkconf to help you migrate an existing or
+ create a new configuration file.
+
+ You can find more information in the mdadm(8) and mdadm.conf(5) manpages.
+
+ custom init.d scripts:
+ while this release includes init.d scripts that do not use mdrun, you may
+ still have custom scripts around. Please consider whether they are still
+ needed. If they do things that the mdadm init.d scripts do not do,
+ consider whether it would be useful to file a wishlist bug.
+
+ Please note that you also need to a correct mdadm.conf file, as described
+ above.
+
+Have fun,
+
+ -- martin f. krafft <madduck@debian.org> Tue, 27 Jun 2006 14:19:23 +0200
diff --git a/debian/README.recipes b/debian/README.recipes
new file mode 100644
index 00000000..6dd769a8
--- /dev/null
+++ b/debian/README.recipes
@@ -0,0 +1,128 @@
+mdadm recipes
+=============
+
+The following examples/recipes may help you with your mdadm experience. I'll
+leave it as an exercise to use the correct device names and parameters in each
+case.
+
+Enjoy. Submissions welcome.
+
+0. create a new array
+~~~~~~~~~~~~~~~~~~~~~
+ mdadm --create -l1 -n2 -x1 /dev/md0 /dev/sd[abc]1 # RAID 1, 1 spare
+ mdadm --create -l5 -n3 -x1 /dev/md0 /dev/sd[abcd]1 # RAID 5, 1 spare
+ mdadm --create -l6 -n4 -x1 /dev/md0 /dev/sd[abcde]1 # RAID 6, 1 spare
+
+1. create a degraded array
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+ mdadm --create -l5 -n3 /dev/md0 /dev/sda1 missing /dev/sdb1
+ mdadm --create -l6 -n4 /dev/md0 /dev/sda1 missing /dev/sdb1 missing
+
+2. assemble an existing array
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ mdadm --assemble --auto=yes /dev/md0 /dev/sd[abc]1
+
+ # if the array is degraded, it won't be started. use --run:
+ mdadm --assemble --auto=yes --run /dev/md0 /dev/sd[ab]1
+
+ # or start it by hand:
+ mdadm --run /dev/md0
+
+3. assemble all arrays in /etc/mdadm/mdadm.conf
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ mdadm --assemble --auto=yes --scan
+
+4. assemble a dirty degraded array
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ mdadm --assemble --auto=yes --force /dev/md0 /dev/sd[ab]1
+ mdadm --run /dev/md0
+
+4b. assemble a dirty degraded array at boot-time
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ If the array is started at boot time by the kernel (partition type 0xfd),
+ you can force-assemble it by passing the kernel boot parameter
+
+ md-mod.start_dirty_degraded=1
+
+5. stop arrays
+~~~~~~~~~~~~~~
+ mdadm --stop /dev/md0
+
+ # to stop all arrays in /etc/mdadm/mdadm.conf
+ mdadm --stop --scan
+
+6. hot-add components
+~~~~~~~~~~~~~~~~~~~~~
+ # on the running array:
+ mdadm --add /dev/md0 /dev/sdc1
+ # if you add more components than the array was setup with, additional
+ # components will be spares
+
+7. hot-remove components
+~~~~~~~~~~~~~~~~~~~~~~~~
+ # on the running array:
+ mdadm --fail /dev/md0 /dev/sdb1
+ # if you have configured spares, watch /proc/mdstat how it fills in
+ mdadm --remove /dev/md0 /dev/sdb1
+
+8. hot-grow a RAID1 by adding new components
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ # on the running array, in either order:
+ mdadm --grow -n3 /dev/md0
+ mdadm --add /dev/md0 /dev/sdc1
+ # note: without growing first, additional devices become spares and are
+ # *not* synchronised after the add.
+
+9. hot-shrink a RAID1 by removing components
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ mdadm --fail /dev/md0 /dev/sdc1
+ mdadm --remove /dev/md0 /dev/sdc1
+ mdadm --grow -n2 /dev/md0
+
+10. convert existing filesystem to RAID 1
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ # The idea is to create a degraded RAID 1 on the second partition, move
+ # data, then hot add the first. This seems safer to me than simply to
+ # force-add a superblock to the existing filesystem.
+ #
+ # Assume /dev/sda1 holds the data (and let's assume it's mounted on
+ # /home) and /dev/sdb1 is empty and of the same size...
+ #
+ mdadm --create /dev/md0 -l1 -n2 /dev/sdb1 missing
+ mkfs -t <type> /dev/md0
+ mount /dev/md0 /mnt
+ tar -cf- -C /home . | tar -xf- -C /mnt -p
+ # consider verifying the data
+ umount /home
+ umount /mnt
+ mount /dev/md0 /home # also change /etc/fstab
+ mdadm --add /dev/md0 /dev/sda1
+
+10b. convert existing filesystem to RAID 1 in-place
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ In-place conversion of /dev/sda1 to /dev/md0 is effectively
+ mdadm --create /dev/md0 -l1 -n2 /dev/sda1 missing
+ however, do NOT do this, as you risk filesystem corruption.
+
+ If you need to do this, first unmount and shrink the filesystem by
+ a megabyte (if supported). Then run the above command, then (optionally)
+ again grow the filesystem as much as possible.
+
+ Do make sure you have backups. If you do not yet, consider method (10)
+ instead (and make backups anyway!).
+
+11. convert existing filesystem to RAID 5/6
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ # See (10) for the basics.
+ mdadm --create /dev/md0 -l5 -n3 /dev/sdb1 /dev/sdc1 missing
+ #mdadm --create /dev/md0 -l6 -n4 /dev/sdb1 /dev/sdc1 /dev/sdd1 missing
+ mkfs -t <type> /dev/md0
+ mount /dev/md0 /mnt
+ tar -cf- -C /home . | tar -xf- -C /mnt -p
+ # consider verifying the data
+ umount /home
+ umount /mnt
+ mount /dev/md0 /home # also change /etc/fstab
+ mdadm --add /dev/md0 /dev/sda1
+
+ -- martin f. krafft <madduck@debian.org> Fri, 07 Jul 2006 10:51:29 +0200
diff --git a/debian/bugscript b/debian/bugscript
new file mode 100644
index 00000000..29744b96
--- /dev/null
+++ b/debian/bugscript
@@ -0,0 +1,61 @@
+#!/bin/bash -eu
+#
+# mdadm bug submission control script
+#
+# allows Debian's bug tools to include relevant information in bug reports.
+#
+# Copyright © 2006 martin f. krafft <madduck@debian.org>
+# distributed under the terms of the Artistic Licence.
+#
+# we need /bin/bash for readline capabalities in the prompt(s)
+#
+
+if [ ! -r /proc/mdstat ]; then
+ echo "The local system does not have RAID support (no drivers loaded)."
+ echo "Without RAID support, I cannot collect as much information as I'd like."
+ yesno "Are you sure you want to report a bug at this time? " yep
+
+ [ "$REPLY" = yep ] || exit 0
+fi
+
+echo "--- mount output" >&3
+mount >&3
+echo >&3
+
+echo "--- mdadm.conf" >&3
+if [ -r /etc/mdadm/mdadm.conf ]; then
+ cat /etc/mdadm/mdadm.conf >&3
+else
+ echo no mdadm.conf file. >&3
+fi
+echo >&3
+
+echo "--- /proc/mdstat:" >&3
+cat /proc/mdstat >&3
+echo >&3
+
+echo "--- /proc/partitions:" >&3
+cat /proc/partitions >&3 2>&3 || true
+echo >&3
+
+echo "--- initrd.img-$(uname -r):" >&3
+if [ -r /boot/initrd.img-$(uname -r) ]; then
+ zcat /boot/initrd.img-$(uname -r) 2>&3 | cpio -t 2>&3 | egrep '/md[a/]' >&3
+fi
+echo >&3
+
+if [ -r /proc/modules ]; then
+ echo "--- /proc/modules:" >&3
+ egrep '(dm_|raid|linear|multipath|faulty)' < /proc/modules >&3
+ echo >&3
+fi
+
+if [ -r /var/log/syslog ]; then
+ echo "--- /var/log/syslog:" >&3
+ egrep "^\w{3} [ :[:digit:]]{11} ($(hostname)|localhost) (kernel: md|mdadm): " /var/log/syslog >&3
+ echo >&3
+fi
+
+echo "--- volume detail:" >&3
+for dev in /dev/[hs]d[a-z]*; do mdadm -E $dev 2>/dev/null && echo --; done >&3
+echo >&3
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 00000000..eb790bc0
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,589 @@
+mdadm (2.5.2-4) experimental; urgency=low
+
+ * The "it takes two to swing" release.
+ * Now does not stop arrays on upgrade or remove. Thanks (and sorry) to
+ Christian Pernegger (and hopefully no others).
+ * Fixed small problem in debconf configuration script related to unbound
+ MAIL_TO variable. Thanks to Christian Pernegger.
+
+ -- martin f. krafft <madduck@debian.org> Fri, 7 Jul 2006 16:59:01 +0200
+
+mdadm (2.5.2-3) experimental; urgency=low
+
+ * The initramfs now gets all RAID modules installed. It's a lot safer to
+ have them all around, the size difference is negligible, and we still only
+ load the needed ones at boot time.
+ * Added /usr/share/mdadm/checkarray, which can be used to check arrays for
+ parity. Also added a debconf question to let the user choose whether
+ cron should run these checks (closes: #377071).
+ * Only shut down arrays automatically when they've been automatically
+ started (closes: #376009).
+ * Make sure the user has a chance to choose the autostart feature by
+ elevating the debconf priority to high (see #376009). The warning about
+ reuse of RAID components has also been elevated to debconf priority high.
+ * The MAIL_TO setting from /etc/default/mdadm has been removed. Instead, use
+ MAILADDR in /etc/mdadm/mdadm.conf. See mdadm.conf(5). Your setting should
+ be automatically migrated.
+ * Now rewrites /etc/default/mdadm (but preserves settings) instead of trying
+ to patch it with changes.
+ * Added note to README.Debian to ensure users know that only the devices
+ listed in mdadm.conf will be autostarted (see #376009).
+ * Now includes latest md.txt from kernel documentation in
+ /usr/share/doc/mdadm/md.txt.gz.
+ * Added some more recipes to /usr/share/doc/mdadm/README.recipes.gz.
+ * Updated debconf translations:
+ - Swedish, thanks to Daniel Nylander!
+ - Brazilian Portuguese, thanks to Felipe Augusto van de Wiel!
+ - Czech, thanks to Miroslav Kure!
+ - Russian, thanks to Yuri Kozlov (closes: #376181).
+ * Further updates to the debconf templates; I hope the translators aren't
+ going to kill me.
+
+ -- martin f. krafft <madduck@debian.org> Fri, 7 Jul 2006 15:09:40 +0200
+
+mdadm (2.5.2-2) experimental; urgency=low
+
+ * The "if it weren't for Munich's wheat beer, there'd be no" release.
+ * Removed -fno-strict-aliasing from compiler options, after upstream fixed
+ the bug that led to its use (see #369779, #356153). Thanks to Elimar
+ Riesebieter for pointing this out (closes: #375876).
+ * Moved detection of RAID devices from initramfs hook to debconf control
+ file, and added a (low-priority) debconf question as to which devices
+ should be started early in the boot sequence. For the cases where we
+ failed to auto-detect previously (e.g. root on LVM on RAID), it's paranoid
+ and suggests to start them all (closes: #375879). Thanks to Alec Berryman
+ for spotting this.
+ * Fixed a typo in README.experimental, which could lead to an unbootable
+ system with initramfs-tools 0.64 or before. Again, thanks to Alec for
+ spotting this.
+ * Extended bug script to include --examine output for all components (at
+ least if called by root, which hopefully should never happen. Err,
+ wait...)
+ * Disabled deprecation warning in mdrun until the transition is complete.
+ * Reworded the debconf templates due to a new question, and also for
+ readability.
+
+ -- martin f. krafft <madduck@debian.org> Thu, 29 Jun 2006 22:54:47 +0200
+
+mdadm (2.5.2-1) experimental; urgency=low
+
+ * New upstream release.
+ * Implemented checks in the initramfs hooks and scripts for compatibility
+ with initramfs-tools. Now we do not need a conflict anymore because
+ mdadm's hooks and scripts will simply do nothing while the ones provided
+ by initramfs-tools are still present.
+ * Not using /bin/bash for mdrun, which I thought we'd need for read timeout
+ support (for the deprecation warning). Since the -n and -t flags to the
+ read shell builtin are non-POSIX, I dropped them, they were merely
+ cosmetic anyway.
+
+ -- martin f. krafft <madduck@debian.org> Tue, 27 Jun 2006 15:06:55 +0200
+
+mdadm (2.5.1-2) experimental; urgency=low
+
+ * Updating dependency on initramfs-tools, which has not yet adopted to mdadm
+ taking over the hooks.
+
+ -- martin f. krafft <madduck@debian.org> Mon, 26 Jun 2006 22:35:08 +0200
+
+mdadm (2.5.1-1) experimental; urgency=low
+
+ * New upstream release:
+ - Really fixes return status of examine (closes: #367901).
+ - Fixes a memory leak in monitor mode (closes: #372618).
+ - Fixes compiler warnings and errors (closes: #373802, #356153, #369779).
+ - Fix byte swapping issues (closes: #369765).
+ - Now lists devices it stops (closes: #369850). This also leads to
+ beautification of the init.d script's stop action.
+ * Fixed RAID init script to not complain about missing logger command.
+
+ -- martin f. krafft <madduck@debian.org> Mon, 26 Jun 2006 00:58:36 +0200
+
+mdadm (2.5-4) experimental; urgency=low
+
+ * The "would you like fries with your parasite?" release.
+ * Now does not require RAID support from the kernel just for package
+ installation; that was silly of me, sorry (closes: Bug#370115).
+ * Added version to Replaces: initramfs-tools dependency.
+ * Further init.d script improvements.
+ * Recommends mail-transport-agent, or the monitor daemon won't be able to
+ send anything.
+ * Ignores failures from modprobe in postinst when RAID modules are not
+ available (closes: #370582).
+
+ -- martin f. krafft <madduck@debian.org> Tue, 6 Jun 2006 12:45:53 +0200
+
+mdadm (2.5-3) experimental; urgency=low
+
+ * Added /usr/share/doc/mdadm/README.recipes with some common usage examples.
+ * Vastly improved the mdadm-raid init.d script output, and removed bashisms.
+
+ -- martin f. krafft <madduck@debian.org> Fri, 2 Jun 2006 00:45:06 +0200
+
+mdadm (2.5-2) experimental; urgency=low
+
+ * The "on her majesty's secret service" release.
+ * Enabled -DDEBIAN during build, which will take care of default permissions
+ on devices created by mdadm. Together with the CREATE configuration
+ directive in 2.5, this now certainly closes: Bug#310241.
+ * Added a patch (incorporating lib/mm/xlate.h from lvm2) to prevent direct
+ access to kernel headers from userspace (closes: Bug#369765).
+ * Disabled strict aliasing compiler checks until we find a better
+ implementation for linked lists in C (closes: Bug#369779, Bug#356153).
+ * Actually decreased the size of the udeb mdadm binary with -Os
+ -fomit-frame-pointer (as suggested by Joey Hess) (closes: Bug#314370)
+ * Added Replaces: initramfs-tools to communicate that we're not conflicting
+ but replacing instead (see Bug#367567)
+ * Updated conflict with initramfs-tools to (<< 0.63) per suggestion by the
+ maintainers.
+
+ -- martin f. krafft <madduck@debian.org> Thu, 1 Jun 2006 20:15:17 +0200
+
+mdadm (2.5-1) experimental; urgency=low
+
+ * The "show me the way to the next whiskey bar" release.
+ * See /usr/share/doc/mdadm/README.experimental or
+ http://madduck.net/~madduck/scratch/README.experimental
+ * New upstream release:
+ - mails include /proc/mdstat output (closes: Bug#355882)
+ - allows specification of device permissions in config (closes: Bug#310241)
+ * /sbin/mdrun has been deprecated and replaced by calls to /sbin/mdadm;
+ a proper deprecation warning is in place (see NEWS).
+ * Moved initramfs hook and script into the package, and switched it to mdadm
+ (from mdadm. Thanks to Stephen Frost for his help (closes: Bug#354144).
+ This should make sure that the right minor numbers are chosen during boot
+ (mainly because mdadm takes care of it all) (closes: Bug#361408).
+ * Removing mdrun from the udeb (d-i patch submitted to debian-boot mailing
+ list)
+ * Upstream links against openssl for SHA1 support (homehost feature), which
+ is a problem. An internal SHA1 implementation is provided, however, so
+ I just link against that.
+ * Switched init.d scripts to use LSB-compliant output.
+ * Enhanced init.d script output.
+
+ -- martin f. krafft <madduck@debian.org> Thu, 1 Jun 2006 02:20:22 +0200
+
+mdadm (2.4.1-2) unstable; urgency=low
+
+ * The "this took way longer than I thought" release.
+ * Migrating to unstable.
+ * If the init.d script creates the mdadm.conf file, it should remove it on
+ purge. To accomplish this, I create a semaphore in /var/lib/mdadm if it
+ was generated, and only remove the conffile on purge if the semaphore
+ exists.
+ * Added a little helper /usr/share/mdadm/mkconf to aid generation of
+ configuration file.
+ * Added a bug script to collect some important information when the user
+ uses Debian bug reporting tools (such as reportbug).
+ * Added a debian/watch file.
+
+ -- martin f. krafft <madduck@debian.org> Wed, 31 May 2006 23:07:48 +0200
+
+mdadm (2.4.1-1) experimental; urgency=low
+
+ * The "I'll kill that maintainer... uh, wait, it's me" release. Sorry for
+ the delay, here's the long awaited new upstream release (closes:
+ Bug#337903, Bug#363592), which gets rid of a bunch of functionality bugs:
+ - reiserfs size does not overflow anymore (closes: Bug#318230)
+ - fixed typos in manpages (closes: Bug#352798)
+ Oh, and we're moving away from that arch nightmare too. Sorry for the
+ confusion.
+ * Experimental release, because I really don't want to be responsible for
+ data loss. Though I am quite sure that the upgrade is painless, I also
+ don't have access to 18 drive RAID 10 with multipath on s390 or similar
+ arrangements.
+ * We now make the /dev/md* devices in postinst unless /dev/md15 exists (no
+ longer checking for /dev/md0), or unless devfs is in use. If udev is used,
+ /dev/md15 will only exist in complex setups, so the devices will be made
+ in /dev/.static by MAKEDEV, which is not really a concern. I opted against
+ unconditionally calling MAKEDEV until #367407 is fixed so as to preserve
+ custom permissions or owner settings. This also acknowledges the NMU
+ (#299623).
+ closes: Bug#310247, Bug#299623
+ * Patched some of the code to make mdadm honour /etc/mdadm/mdadm.conf over
+ /etc/mdadm.conf (see NEWS).
+ * Fixed a couple of typos in the mdadm(8) manpage; thanks to Reuben Thomas.
+ closes: Bug#345669, Bug#345667
+ * Pushed Standards-Version to 3.7.2; no changes required.
+ * Updated Debconf translations:
+ - Vietnamese by Clytie Siddall (closes: Bug#323950)
+ - Czech by Miroslav Kure (closes: Bug#360290)
+ - Russian by Yuri Kozlov (closes: Bug#361116)
+ - French by Eric Madesclair (closes: Bug#323988)
+ * Added new Debconf translations:
+ - Swedish by Daniel Nylander (closes: Bug#333486)
+ - Dutch by Frans Pop (closes: Bug#344714)
+
+ -- martin f. krafft <madduck@debian.org> Tue, 16 May 2006 18:21:36 -0500
+
+mdadm (1.12.0-1) unstable; urgency=low
+
+ * New upstream release.
+ (obsoletes branches: symlinks)
+ (reduces branches: gcc4signedness, debian, autoscan)
+ * Fixed typo in mdadm.conf(5) manpage (closes: Bug#321152).
+
+ -- martin f. krafft <madduck@debian.org> Sun, 24 Jul 2005 19:20:01 +0200
+
+mdadm (1.9.0-5) unstable; urgency=low
+
+ * martin f. krafft: (the, "look ma', we're maintained in arch now!" release)
+ (no functional differences except for added/updated translations)
+ - Acknowledge NMU by Steve Langasek; thanks! (closes: Bug#299623)
+ - split diff.gz into different arch branches (see debian/arch-branches).
+ - debian/control:
+ - Changed maintainer to pkg-mdadm-devel.
+ - Reworded some of the descriptions (closes: Bug#304170).
+ - Pushed Standards-Version to 3.6.2.1; no changes needed.
+ - fixed po-debconf integration
+ - debian/rules:
+ - fixed po-debconf integration
+ - l10n changes:
+ - Removed amiguity from debconf template (closes: Bug#312754).
+ - Added Vietnamese debconf translation; thanks to Clytie Siddall!
+ (closes: Bug#312753)
+ - Added Czech debconf translation; thanks to Miroslav Kure! (closes: Bug#319626)
+ - Updated German debconf translation; thanks to Jens Seidel! (closes: Bug#313981)
+ - backported upstream's gcc4 signedness fixes from 1.12.0 (gcc4signedness
+ branch) (closes: Bug#319743).
+
+ -- martin f. krafft <madduck@debian.org> Sun, 24 Jul 2005 17:58:46 +0200
+
+mdadm (1.9.0-4.1) unstable; urgency=high
+
+ * Non-maintainer upload.
+ * High-urgency upload for sarge-targetted RC bugfix
+ * Make sure error output from MAKEDEV is sent to stderr, to avoid
+ interfering with debconf; this avoids installation problems on
+ udev-using systems. Thanks to Jonas Smedegaard for the patch.
+ Closes: #299623.
+
+ -- Steve Langasek <vorlon@debian.org> Wed, 1 Jun 2005 03:36:42 -0700
+
+mdadm (1.9.0-4) unstable; urgency=high
+
+ * High-urgency upload for sarge targeted RC bugfix.
+ * mdrun: replaced invocation of /usr/bin/seq with hard-coded sequence
+ (closes: Bug#310671).
+
+ -- martin f. krafft <madduck@debian.org> Wed, 25 May 2005 09:51:41 +0200
+
+mdadm (1.9.0-3) unstable; urgency=high
+
+ * High-urgency upload for sarge targeted RC bugfix.
+ * Applied patch by Peter Samuelson <peter@p12n.org>, which causes mdadm to
+ follow symlinks of device nodes (closes: #274859, #310412, #310492).
+ * Added myself as co-maintainer as per agreement with Mario Joussen.
+
+ -- martin f. krafft <madduck@debian.org> Tue, 24 May 2005 00:03:49 +0200
+
+mdadm (1.9.0-2.3) unstable; urgency=high
+
+ * Non-maintainer upload.
+ * Do not prevent postinst node creation when udev is being used; MAKEDEV
+ puts files into /dev/.static/dev with udev, which is needed so that device
+ nodes will be there even if udev is removed. Sorry for letting this slip
+ my mind and thanks to Steve Langasek for spotting this error.
+ * Leaving urgency at high to make sarge.
+
+ -- martin f. krafft <madduck@debian.org> Sun, 22 May 2005 19:35:04 +0200
+
+mdadm (1.9.0-2.2) unstable; urgency=high
+
+ * Non-maintainer upload.
+ * High-urgency upload for sarge targeted RC bugfix.
+ * Move mdadm-raid back to S25 as it needs to run after modules have been
+ loaded at S20 (see followups to #294404, #301560).
+ * Verified that Steve Langasek's patch to config.c (see item 4 of the
+ 1.9.0-2.1 changelog) is necessary for `mdadm -A -s` to work.
+ (closes: #301560)
+ * Integrated patch by Erik van Konijnenburg to fix mdadm's --auto
+ option in the presence of --scan.
+ (closes: #294404, #273182, #284028, #310126).
+ * Modified mdrun to call mdadm with --auto in assembly mode. Removed code
+ which would auto-create 24 device nodes during system startup when udev
+ was used.
+ * Fixed next_free_md function in mdrun to iterate all 24 nodes instead of
+ using some fragile shell globbing, which did not work anyway.
+ * Prevent postinst node creation when udev is being used.
+ * Added a README.udev file to /usr/share/doc/mdadm.
+
+ -- martin f. krafft <madduck@debian.org> Sun, 22 May 2005 12:57:56 +0200
+
+mdadm (1.9.0-2.1) unstable; urgency=high
+
+ * Non-maintainer upload.
+ * High-urgency upload for sarge targetted RC bugfix.
+ * Start mdadm-raid before udev on boot-up, so that mdadm device node
+ creation is honored, and support changing the init script ordering
+ on upgrades (closes: #294404).
+ * Fix mdadm --scan to prefer the values contained in /proc/partitions,
+ instead of picking up device node names at random from /dev.
+ * Teach mdrun to look at /dev/.static/dev instead of /.dev for udev
+ mounts requiring autostart (closes: #301560).
+
+ -- Steve Langasek <vorlon@debian.org> Sun, 27 Mar 2005 21:59:12 -0800
+
+mdadm (1.9.0-2) unstable; urgency=low
+
+ * Patched is_standard() to accept /dev/md/* names as standard.
+ Thanks to Colin Watson <cjwatson@debian.org>.
+ (closes: Bug#296794)
+ * Added another typecast to make it compilable on amd64 with gcc-4.0.
+ Thanks to Andreas Jochens <aj@andaco.de>.
+ (closes: Bug#294217)
+ * Removed unnecessary second assignment to $BASE in mdrun.
+ Thanks to Colin Watson <cjwatson@debian.org>.
+ (closes: Bug#295433)
+
+ -- Mario Joussen <joussen@debian.org> Sun, 6 Mar 2005 14:22:24 +0100
+
+mdadm (1.9.0-1) unstable; urgency=high
+
+ * New upstream release.
+ Solves problems with same UUID for each array.
+ Again a stable upstream version.
+ (closes: Bug#292282, Bug#293406, Bug#292784, Bug#290363, Bug#292715)
+ * Added some typecasts to make it compilable on amd64 with gcc-4.0.
+ Thanks to Andreas Jochens <aj@andaco.de>.
+ (closes: Bug#287638)
+
+ -- Mario Joussen <joussen@debian.org> Sun, 6 Feb 2005 12:25:03 +0100
+
+mdadm (1.8.1-1) unstable; urgency=low
+
+ * New upstream release.
+ Fixed segfault if no config file present and --scan is used.
+ (closes: Bug#283425, Bug#282604, Bug#284024)
+ * Fixed typo in detailed help of grow mode.
+ (closes: Bug#286980)
+ * Added japanese debconf translation. Thanks to Hideki Yamane
+ <henrich@samba.gr.jp>.
+ (closes: Bug#281073)
+ * Fixed missing variable initialization causing segfaults.
+
+ -- Mario Joussen <joussen@debian.org> Sun, 26 Dec 2004 14:44:31 +0100
+
+mdadm (1.7.0-2) unstable; urgency=high
+
+ * Changed debconf script to save the settings from the config file.
+ Thanks to Fabio Massimo Di Nitto <fabbione@fabbione.net> and
+ Frank Lichtenheld <djpig@debian.org> for the patch.
+ (closes: Bug#274208)
+ * Moved try to load md module inside the AUTOSTART if branch in
+ /etc/init.d/mdadm.
+ * Removed try to load md module from /etc/init.d/mdadm-raid.
+
+ -- Mario Joussen <joussen@debian.org> Sun, 24 Oct 2004 19:48:06 +0200
+
+mdadm (1.7.0-1) unstable; urgency=low
+
+ * New upstream release.
+ (closes: Bug#267814)
+
+ -- Mario Joussen <joussen@debian.org> Sun, 12 Sep 2004 20:48:33 +0200
+
+mdadm (1.6.0-3) unstable; urgency=high
+
+ * Added 'Conflicts: raidtools2 (<< 1.00.3-12.1)' because these packages
+ contain a mdrun.8 man page also.
+ (closes: Bug#268634, Bug#266527)
+ * Updated the french translation.
+ Thanks to Eric <eric-m@wanadoo.fr>
+ (closes: Bug#266251)
+
+ -- Mario Joussen <joussen@debian.org> Sat, 28 Aug 2004 18:23:17 +0200
+
+mdadm (1.6.0-2) unstable; urgency=low
+
+ * Included version 0.97 of "Debian Software Root Raid Documentation".
+ * Now mdrun is only used if no mdadm.conf is present.
+ Thanks to Thomas Prokosch <7nrmi1s02@sneakemail.com>.
+ (closes: Bug#264059)
+ * Added man page for mdrun.
+ Thanks to Robert Collins <robertc@robertcollins.net>.
+ (closes: Bug#265480)
+ * Moved /etc/mdadm/debian.conf to /etc/default/mdadm.
+ (closes: Bug#254922)
+ * Added a little workaround to mdrun to interact better with udev.
+ Thanks to Fabio Massimo Di Nitto <fabbione@fabbione.net>.
+ (closes: Bug#259491)
+ * Updated Brazilian Portuguese translation.
+ Thanks to Andre Luis Lopes <andrelop@debian.org>.
+ (closes: Bug#264220)
+
+ -- Mario Joussen <joussen@debian.org> Mon, 16 Aug 2004 22:10:59 +0200
+
+mdadm (1.6.0-1) unstable; urgency=low
+
+ * New upstream release.
+ Detect degraded arrays in --monitor mode now.
+ (closes: Bug#257357)
+ * Changed default to autostart RAID array.
+ (closes: Bug#250792)
+ * Fixed mdrun problem with kernel 2.6.
+ Thanks to Andre Tomt <andre@tomt.net> and Fabio Massimo Di Nitto
+ <fabbione@fabbione.net>
+ (closes: Bug#231823)
+ * Changed reuse warning to be less misleading.
+ (closes: Bug#253339)
+
+ -- Mario Joussen <joussen@debian.org> Tue, 20 Jul 2004 21:40:33 +0200
+
+mdadm (1.5.0-2) unstable; urgency=low
+
+ * Added french debconf template.
+ Thanks to Eric Madesclair <eric-m@wanadoo.fr>.
+ (closes: Bug#231968)
+
+ -- Mario Joussen <joussen@debian.org> Tue, 4 May 2004 21:29:19 +0200
+
+mdadm (1.5.0-1) unstable; urgency=low
+
+ * New upstream release.
+ * Rewrote debconf templates to avoid referring to debconf interface
+ widgets.
+ (closes: Bug#231221)
+ * Removed manual scan for RAID devices from init script.
+ (closes: Bug#233122, Bug#236762)
+ * Added creation of an udeb package.
+ (closes: Bug#243609)
+ * Added "Debian Software Root Raid Documentation".
+ Thanks to Lucas Albers <albersl@cs.montana.edu>.
+
+ -- Mario Joussen <joussen@debian.org> Sun, 25 Apr 2004 16:16:06 +0200
+
+mdadm (1.4.0-3) unstable; urgency=low
+
+ * Updated to standards version 3.6.1.0 and debhelper 4.
+ * Corrected definition of BLKGETSIZE64 macro to compile with the
+ 2.6 kernel headers.
+ (closes: Bug#223191)
+ * Swichted to po-debconf to provide localized debconf templates.
+ (closes: Bug#225288)
+
+ -- Mario Joussen <joussen@debian.org> Sun, 1 Feb 2004 19:30:53 +0100
+
+mdadm (1.4.0-2) unstable; urgency=low
+
+ * Corrected human readable size calculation.
+ (closes: Bug#225041)
+ * Added a warning about reusing hard disks and using the autostart
+ feature.
+ (closes: Bug#223790)
+
+ -- Mario Joussen <joussen@debian.org> Thu, 25 Dec 2003 19:52:57 +0100
+
+mdadm (1.4.0-1) unstable; urgency=low
+
+ * New upstream release.
+
+ -- Mario Joussen <joussen@debian.org> Sun, 7 Dec 2003 19:39:27 +0100
+
+mdadm (1.3.0-2) unstable; urgency=low
+
+ * Added upstream changelog to package.
+
+ -- Mario Joussen <joussen@debian.org> Tue, 12 Aug 2003 21:51:59 +0200
+
+mdadm (1.3.0-1) unstable; urgency=low
+
+ * New upstream release.
+ (closes: Bug#191561, Bug#200921)
+
+ -- Mario Joussen <joussen@debian.org> Thu, 31 Jul 2003 20:59:20 +0200
+
+mdadm (1.2.0-1) unstable; urgency=low
+
+ * New upstream release. (closes: Bug#183191)
+ * New version of mdrun that works properly with devfs and temporary
+ device directory.
+ (closes: Bug#182035)
+ * Added 'Conflicts: raidtools' because of a name clash with mdrun.
+ (closes: Bug#182960)
+
+ -- Mario Joussen <joussen@debian.org> Sun, 16 Mar 2003 13:32:45 +0100
+
+mdadm (1.0.1-4) unstable; urgency=low
+
+ * Changed mdrun so that it can deal with partition statistics in
+ /proc/partitions.
+ (closes: Bug#174000, Bug#175130)
+ * Added russian (ru) debconf template translation. Thanks to Sergey
+ Spiridonov <sena@hurd.homeunix.org>.
+
+ -- Mario Joussen <joussen@debian.org> Sun, 5 Jan 2003 13:14:45 +0100
+
+mdadm (1.0.1-3) unstable; urgency=low
+
+ * Fixed a bug in mdrun. (closes: Bug#167607)
+
+ -- Mario Joussen <joussen@debian.org> Mon, 11 Nov 2002 07:53:23 +0100
+
+mdadm (1.0.1-2) unstable; urgency=low
+
+ * Fixed typo in help option. (closes: Bug#151533)
+ * Added a script that discovers and assembles all arrays automatically.
+ Thanks to Eduard Bloch <blade@debian.org>.
+ (closes: Bug#161699)
+
+ -- Mario Joussen <joussen@debian.org> Fri, 1 Nov 2002 13:46:47 +0100
+
+mdadm (1.0.1-1) unstable; urgency=low
+
+ * New upstream release.
+
+ -- Mario Joussen <joussen@debian.org> Thu, 30 May 2002 14:01:22 +0200
+
+mdadm (0.8.2-1) unstable; urgency=low
+
+ * New upstream release.
+ * Splitted up mdadm.templates in one file for each language.
+ * Added brazilian portuguese (pt_BR) debconf template translation.
+ Thanks to Andre Luis Lopes <andrelop@ig.com.br>.
+ (closes: Bug#141540)
+
+ -- Mario Joussen <joussen@debian.org> Thu, 18 Apr 2002 19:31:00 +0200
+
+mdadm (0.7.2-1) unstable; urgency=low
+
+ * New upstream release.
+ * 'mdctl' was renamed to 'mdadm' upstream.
+ * Removed question about updating mdctl init script links.
+
+ -- Mario Joussen <joussen@debian.org> Sat, 23 Mar 2002 02:50:51 +0100
+
+mdctl (0.5-4) unstable; urgency=low
+
+ * Added debconf template to ask the user if the init script links
+ should be updated.
+
+ -- Mario Joussen <joussen@debian.org> Mon, 4 Mar 2002 22:53:37 +0100
+
+mdctl (0.5-3) unstable; urgency=low
+
+ * Splitted up init script in two parts. One starts the md array and the
+ other starts the raid monitor daemon.
+ (closes: Bug#136184)
+
+ -- Mario Joussen <joussen@debian.org> Thu, 28 Feb 2002 22:45:57 +0100
+
+mdctl (0.5-2) unstable; urgency=low
+
+ * Included optimization in Makefile
+ (closes: Bug#127687)
+ * Removed Conflicts/Replaces/Provides: mdutils
+ (closes: Bug#127684, Bug#127719)
+ * Added an init script, which can start md arrays and the raid monitor
+ daemon
+ * MD devices are now created under /dev if necessary
+ * Added a sample configuration file
+
+ -- Mario Joussen <mario@joussen.org> Sun, 13 Jan 2002 23:43:40 +0100
+
+mdctl (0.5-1) unstable; urgency=low
+
+ * Initial Release.
+ (closes: Bug#126610)
+
+ -- Mario Joussen <mario@joussen.org> Wed, 26 Dec 2001 17:07:09 +0100
diff --git a/debian/checkarray b/debian/checkarray
new file mode 100644
index 00000000..f2c60c6e
--- /dev/null
+++ b/debian/checkarray
@@ -0,0 +1,120 @@
+#!/bin/sh -eu
+#
+# checkarray -- initiates a check run of a device's parity information.
+#
+# Copyright © 2006 martin f. krafft <madduck@debian.org>
+# distributed under the terms of the Artistic Licence.
+#
+REVISION=2006.07.07.1121
+
+PROGNAME=${0##*/}
+
+about()
+{
+ echo "$PROGNAME -- RAID parity checker tool (revision $REVISION)"
+ echo "Copyright © 2006 martin f. krafft <madduck@debian.org>"
+ echo "Released under the terms of the Artistic Licence."
+}
+
+usage()
+{
+ about
+ echo
+ echo "Usage: $PROGNAME [options] [devices]"
+ echo
+ echo "Valid options are:"
+ cat <<-_eof | column -s\& -t
+ -a|--all & check all assembled arrays (check /proc/mdstat).
+ -c|--cron & honour setting AUTOCHECK in /etc/default/mdadm.
+ -q|--quiet & suppress informational messages.
+ -Q|--real-quiet & suppress all output messages, including warnings and errors.
+ -h|--help & show this output.
+ -V|--version & show version information.
+ _eof
+ echo
+ echo "Examples:"
+ echo " $PROGNAME --all"
+ echo " $PROGNAME /dev/md[123]"
+ echo
+ echo "Devices can be specified in almost any format. The following are"
+ echo "all equivalent:"
+ echo " /dev/md0, md0, /dev/md/0, /sys/block/md0"
+ echo
+ echo "The --all option overrides all devices passed to the script."
+ echo
+ echo "You can control the status of a check with /proc/mdstat ."
+}
+
+SHORTOPTS=achVqQ
+LONGOPTS=all,cron,help,version,quiet,real-quiet
+
+eval set -- $(getopt -o $SHORTOPTS -l $LONGOPTS -n $PROGNAME -- "$@")
+
+devices=''
+cron=0
+all=0
+quiet=0
+
+for opt in $@; do
+ case "$opt" in
+ -a|--all) all=1;;
+ -c|--cron) cron=1;;
+ -h|--help) usage; exit 0;;
+ -q|--quiet) quiet=1;;
+ -Q|--real-quiet) quiet=2;;
+ -V|--version) about; exit 0;;
+ /dev/md/*|md/*) devices="${devices:+$devices }md${opt#*md/}";;
+ /dev/md*|md*) devices="${devices:+$devices }${opt#/dev/}";;
+ /sys/block/md*) devices="${devices:+$devices }${opt#/sys/block/}";;
+ --) :;;
+ *) echo "$PROGNAME: E: invalid option: $opt" >&2; usage >&2; exit0;;
+ esac
+done
+
+is_true()
+{
+ case "${1:-}" in
+ [Yy]es|[Yy]|1|[Tt]rue|[Tt]) return 0;;
+ *) return 1;
+ esac
+}
+
+DEBIANCONFIG=/etc/default/mdadm
+[ -f $DEBIANCONFIG ] && . $DEBIANCONFIG
+if [ $cron = 1 ] && ! is_true ${AUTOCHECK:-false}; then
+ [ $quiet -lt 1 ] && echo "$PROGNAME: I: disabled in $DEBIANCONFIG ." >&2
+ exit 0
+fi
+
+if [ ! -f /proc/mdstat ]; then
+ [ $quiet -lt 2 ] && echo "$PROGNAME: E: RAID subsystem not loaded, or /proc unavailable." >&2
+ exit 2
+fi
+
+if [ -z "$(ls /sys/block/md*/md/sync_action 2>/dev/null)" ]; then
+ [ $quiet -lt 2 ] && echo "$PROGNAME: E: no kernel support for parity checks." >&2
+ exit 3
+fi
+
+[ $all = 1 ] && devices="$(ls -d1 /sys/block/md* | cut -d/ -f4)"
+
+for dev in $devices; do
+ SYNC_ACTION_CTL=/sys/block/$dev/md/sync_action
+ if [ ! -w $SYNC_ACTION_CTL ]; then
+ [ $quiet -lt 2 ] && echo "$PROGNAME: E: $SYNC_ACTION_CTL not writeable." >&2
+ exit 4
+ fi
+
+ if [ "$(cat $SYNC_ACTION_CTL)" != idle ]; then
+ [ $quiet -lt 2 ] && echo "$PROGNAME: W: device $dev not idle, skipping..." >&2
+ continue
+ fi
+
+ # run check for the device. The kernel will make sure that these requests
+ # are properly queued so as to not kill one of the devices.
+ echo check > $SYNC_ACTION_CTL
+ [ $quiet -lt 1 ] && echo "$PROGNAME: I: check queued for device $dev." >&2
+
+done
+
+exit 0
diff --git a/debian/compat b/debian/compat
new file mode 100644
index 00000000..b8626c4c
--- /dev/null
+++ b/debian/compat
@@ -0,0 +1 @@
+4
diff --git a/debian/control b/debian/control
new file mode 100644
index 00000000..b10b8946
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,28 @@
+Source: mdadm
+Section: admin
+Priority: optional
+Maintainer: Debian mdadm maintainers <pkg-mdadm-devel@lists.alioth.debian.org>
+Uploaders: martin f. krafft <madduck@debian.org>, Mario Joussen <joussen@debian.org>
+Build-Depends: debhelper (>= 4), po-debconf, dpatch, groff-base
+Standards-Version: 3.7.2
+
+Package: mdadm
+Architecture: any
+Depends: ${shlibs:Depends}, makedev, ${misc:Depends}, lsb-base (>= 3.1-6)
+Recommends: mail-transport-agent
+Replaces: mdctl
+Conflicts: mdctl (<< 0.7.2), raidtools2 (<< 1.00.3-12.1)
+Description: tool to administer Linux MD device arrays (software RAID)
+ mdadm is a program that can be used to create, manage, and monitor MD
+ devices (software RAID), as well as multipath devices.
+
+Package: mdadm-udeb
+Section: debian-installer
+XC-Package-Type: udeb
+Architecture: any
+Depends: ${shlibs:Depends}
+Description: tool to administer Linux MD device arrays (software RAID)
+ mdadm is a program that can be used to create, manage, and monitor MD
+ devices (software RAID), as well as multipath devices.
+ .
+ mdadm-udeb is a minimal package used by the debian-installer.
diff --git a/debian/copyright b/debian/copyright
new file mode 100644
index 00000000..f9977ed8
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,21 @@
+This package was debianized by Mario Jou/3en <joussen@debian.org> on
+Wed, 26 Dec 2001 17:07:09 +0100.
+Martin F. Krafft <madduck@debian.org> took over on
+Tue, 16 May 2006 13:21:06 -0500
+
+The mdadm source was downloaded from
+ http://www.cse.unsw.edu.au/~neilb/source/mdadm/
+
+Upstream Author: Neil Brown <neilb@cse.unsw.edu.au>
+
+Copyright © 2001-2006 Neil Brown <neilb@cse.unsw.edu.au>
+Packaging copyright © 2001-2005 Mario Jou/3en <joussen@debian.org>
+Packaging copyright © 2005-2006 Martin F. Krafft <madduck@debian.org>
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+On Debian GNU/Linux systems, the complete text of the GNU General
+Public License can be found in '/usr/share/common-licenses/GPL'.
diff --git a/debian/docs b/debian/docs
new file mode 100644
index 00000000..e6afde71
--- /dev/null
+++ b/debian/docs
@@ -0,0 +1,2 @@
+TODO
+debian/rootraiddoc.97.html
diff --git a/debian/initramfs/hook b/debian/initramfs/hook
new file mode 100644
index 00000000..eefe1076
--- /dev/null
+++ b/debian/initramfs/hook
@@ -0,0 +1,103 @@
+#!/bin/sh -eu
+#
+# Copyright © 2006 Martin F. Krafft <madduck@debian.org>
+# based on the scripts in the initramfs-tools package.
+# released under the terms of the Artistic Licence.
+#
+
+PREREQ=""
+
+prereqs()
+{
+ echo "$PREREQ"
+}
+
+case ${1:-} in
+ prereqs)
+ prereqs
+ exit 0
+ ;;
+esac
+
+if [ -e /usr/share/initramfs-tools/hooks/md ]; then
+ echo "I: mdadm: initramfs md hook still present, I thus won't pop the trunk." >&2
+ echo "I: Please see /usr/share/doc/mdadm/README.experimental ." >&2
+ exit 0
+fi
+
+if [ -e /etc/initramfs-tools/hooks/md ]; then
+ echo "W: mdadm: I found /etc/initramfs-tools/hooks/md, which may conflict" >&2
+ echo "with this version of mdadm. Please see /usr/share/doc/mdadm/README.mdrun" >&2
+ echo "and /usr/share/initramfs-tools/hooks/mdadm for reference and update" >&2
+ echo "(or remove) the file." >&2
+ exit 0
+fi
+
+MDADM=$(command -v mdadm)
+[ -x $MDADM ] || exit 0
+
+[ -r /usr/share/initramfs-tools/hook-functions ] || exit 0
+. /usr/share/initramfs-tools/hook-functions
+
+INITRDSTART=all
+[ -s /etc/default/mdadm ] && . /etc/default/mdadm
+
+DEVS=''; LEVELS=''
+if [ "$INITRDSTART" != none ]; then
+ eval $(mdadm --examine --scan --config=partitions \
+ | while read a dev level num uuid; do
+
+ if [ "$INITRDSTART" = all ] || expr "$INITRDSTART" : ".*${dev}.*" >/dev/null; then
+ DEVS="$DEVS ${dev}:${uuid#UUID=}"
+ LEVELS="$LEVELS ${level#level=}"
+ fi
+
+ echo "DEVS='$DEVS'"
+ echo "LEVELS='$LEVELS'"
+ done)
+
+ [ "$INITRDSTART" = all ] && DEVS=all
+fi
+
+uniquify()
+{
+ for i in $@; do echo "$i"; done | sort -u
+}
+
+if [ -n "$DEVS" ]; then
+
+ touch $DESTDIR/conf/mdadm.conf
+ echo "RAID_DEVS='${DEVS## }'" >> $DESTDIR/conf/mdadm.conf
+
+ # 1:1 mapping between level names and modules
+ MODULES="$(uniquify $LEVELS | tr '\n' ' ')"
+ echo "RAID_MODULES='$MODULES'" >> $DESTDIR/conf/mdadm.conf
+
+ copy_exec $MDADM /sbin
+
+ # copy all modules into the initramfs, just for safety
+ MODULES="linear multipath raid0 raid1 raid5 raid6 raid10"
+ for mod in $MODULES; do manual_add_modules $mod; done
+
+ # prevent problems with old md script. Since this hook is only called if
+ # either this file does not exist or the user chose to try the new stuff,
+ # this is safe.
+ rm -f $DESTDIR/scripts/local-top/md
+
+ if [ "$DEVS" = all ]; then
+ DEVNAMES="all RAID arrays"
+ else
+ DEVNAMES="$(for i in $DEVS; do echo -n "${i%%:*},"; done)"
+ DEVNAMES="${DEVNAMES%,}"
+ fi
+
+ echo "I: mdadm: RAID support installed to mount $DEVNAMES during boot." >&2
+
+else
+ echo "I: mdadm: no RAID devices selected for initrd initialisation." >&2
+ echo "I: mdadm: not including RAID stuff." >&2
+fi
+
+echo 'I: mdadm: use `dpkg-reconfigure -plow mdadm` if this is not correct.' >&2
+
+exit 0
diff --git a/debian/initramfs/script.local-top b/debian/initramfs/script.local-top
new file mode 100644
index 00000000..aeeb28af
--- /dev/null
+++ b/debian/initramfs/script.local-top
@@ -0,0 +1,93 @@
+#!/bin/sh -eu
+#
+# Copyright © 2006 Martin F. Krafft <madduck@debian.org>
+# based on the scripts in the initramfs-tools package.
+# released under the terms of the Artistic Licence.
+#
+
+PREREQ="udev_helper"
+
+prereqs()
+{
+ echo "$PREREQ"
+}
+
+case ${1:-} in
+ prereqs)
+ prereqs
+ exit 0
+ ;;
+esac
+
+. /scripts/functions
+
+if [ -e /scripts/local-top/md ]; then
+ log_failure_msg "old RAID initialisation script found, getting out of its way..."
+ exit 1
+fi
+
+MDADM=$(command -v mdadm)
+[ -x $MDADM ] || exit 0
+
+verbose()
+{
+ case "$quiet" in y*|Y*|1|t*|T*)
+ return 1;;
+ *)
+ return 0;;
+ esac
+}
+
+[ -s /conf/mdadm.conf ] && . /conf/mdadm.conf
+
+mkdir /etc/mdadm
+$MDADM --examine --scan --config=partitions > /etc/mdadm.conf
+
+verbose && log_begin_msg Loading RAID modules
+for module in ${RAID_MODULES:-}; do
+ if modprobe --syslog "$module"; then
+ verbose && log_success_msg "loaded module ${module}."
+ else
+ verbose && log_failure_msg "failed to load module ${module}."
+ fi
+done
+log_end_msg
+
+if [ ! -f /proc/mdstat ]; then
+ verbose && panic "cannot initialise RAID subsystem (/proc/mdstat missing)"
+ exit 1
+fi
+
+if [ ${RAID_DEVS:-all} = all ]; then
+
+ verbose && log_begin_msg "Assemblinging all RAID arrays"
+
+ mkdir /dev/md # busybox has devfs compat flag set, so expect /dev/md/X nodes
+ if $MDADM --assemble --config=/etc/mdadm.conf --scan --run --auto=yes; then
+ verbose && log_success_msg "success"
+ else
+ verbose && log_failure_msg "failed"
+ fi
+
+ verbose && log_end_msg
+
+else
+ for dev in ${RAID_DEVS:-}; do
+
+ DEV=${dev%%:*}
+ UUID=${dev#*:}
+
+ verbose && log_begin_msg "Assembling RAID array $DEV ($UUID)"
+
+ if $MDADM --assemble --config=partitions --run --auto=yes --uuid $UUID $DEV; then
+ verbose && log_success_msg "started $DEV ($UUID)"
+ else
+ verbose && log_failure_msg "failed to start $DEV ($UUID)"
+ fi
+
+ verbose && log_end_msg
+
+ done
+fi
+
+exit 0
diff --git a/debian/mdadm-raid b/debian/mdadm-raid
new file mode 100644
index 00000000..4616c2aa
--- /dev/null
+++ b/debian/mdadm-raid
@@ -0,0 +1,214 @@
+#!/bin/sh -eu
+#
+# Start all arrays specified in the configuration file.
+#
+# Copyright © 2001-2005 Mario Jou/3en <joussen@debian.org>
+# Copyright © 2005-2006 Martin F. Krafft <madduck@debian.org>
+# Distributable under the terms of the GNU GPL version 2.
+
+MDADM=/sbin/mdadm
+CONFIG=/etc/mdadm/mdadm.conf
+ALTCONFIG=/etc/mdadm.conf
+DEBIANCONFIG=/etc/default/mdadm
+RUNDIR=/var/run/mdadm
+AUTOSTARTED_DEVICES=$RUNDIR/autostarted-devices
+
+test -x $MDADM || exit 0
+
+AUTOSTART=true
+test -f $DEBIANCONFIG && . $DEBIANCONFIG
+
+. /lib/lsb/init-functions
+
+short_dev()
+{
+ local dev=${1##*/}
+ case "$dev" in
+ md*|md_*|mdp*|mdp_*) echo "$dev";;
+ *) echo "md${dev}";;
+ esac
+}
+
+log()
+{
+ case "$1" in
+ [[:digit:]]*) success=$1; shift;;
+ *) :;;
+ esac
+ log_action_begin_msg "$1"; shift
+ log_action_end_msg ${success:-0} "$*"
+}
+
+log_dev()
+{
+ success=${1:-}; shift
+ dev=${1:-}; shift
+ log $success "${PREFIX:-} $(short_dev ${dev:-})" "$*"
+}
+
+log_notice()
+{
+ log 0 "${PREFIX:-}s" "$*"
+}
+
+log_problem()
+{
+ log 1 "${PREFIX:-}s" "$*"
+}
+
+is_true()
+{
+ case "${1:-}" in
+ [Yy]es|[Yy]|1|[Tt]rue|[Tt]) return 0;;
+ *) return 1;
+ esac
+}
+
+case "${1:-}" in
+ start)
+ PREFIX="Assembling RAID array"
+
+ if is_true $AUTOSTART; then
+ if [ ! -f /proc/mdstat ] && [ -x $(command -v modprobe) ] ; then
+ modprobe -k md >/dev/null 2>&1
+ fi
+ if [ ! -f /proc/mdstat ]; then
+ log_problem "failed to load RAID subsystem"
+ exit 0
+ fi
+
+ if [ -f $CONFIG ] || [ -f $ALTCONFIG ]; then
+ mkdir -p $RUNDIR
+
+ # ugly hack because shell sucks
+ IFSOLD=${IFS:-}
+ IFS='
+'
+ for line in $($MDADM --assemble --scan --auto=yes 2>&1); do
+ IFS=$IFSOLD
+ set -- $line
+ shift
+
+ case "$@" in
+
+ 'No arrays found in config file')
+ # no point in carrying on.
+ shift
+ log_problem "no $*"
+ exit 0
+ ;;
+
+ 'Unknown keyword'*)
+ # warn only
+ if [ -x $(command -v logger >/dev/null) ]; then
+ logger -t mdadm -p syslog.warning -- "$*"
+ elif [ -w /dev/console ]; then
+ echo "mdadm: $*" > /dev/console
+ else
+ echo "mdadm: $*" >&2
+ fi
+ ;;
+
+ *' is already active.')
+ log_dev 0 $1 "already running"
+ ;;
+
+ *'has been started with '[[:digit:]]*' drives.')
+ log_dev 0 $1 "started [$6/$6]"
+ echo $1 >> $AUTOSTARTED_DEVICES
+ ;;
+
+ *'has been started with '[[:digit:]]*' drives (out of '[[:digit:]]*').')
+ log_dev 0 $1 "degraded [$6/${10%).}]"
+ echo $1 >> $AUTOSTARTED_DEVICES
+ ;;
+
+ *'assembled from '[[:digit:]]*' drive's#' - not enough to start the array.')
+ log_dev 1 $1 "not enough devices"
+ ;;
+
+ 'no devices found for '*)
+ log_dev 1 $5 "no devices found"
+ ;;
+
+ 'failed to RUN_ARRAY '*': Input/output error')
+ log_dev 1 ${4%:} "RUN_ARRAY input/output error"
+ ;;
+
+ *) :;;
+ esac
+ done || exit $?
+ else
+ log_problem "no $CONFIG file"
+ fi
+ else
+ log_notice "disabled in $DEBIANCONFIG"
+ fi
+ ;;
+
+ stop)
+ PREFIX="Stopping RAID array"
+
+ if is_true $AUTOSTART; then
+ if [ ! -f /proc/mdstat ]; then
+ log_problem "no RAID subsystem loaded"
+ exit 0
+ fi
+
+ # ugly hack because shell sucks
+ IFSOLD=${IFS:-}
+ IFS='
+'
+ set +e
+ for line in $($MDADM --stop --scan 2>&1); do
+ set -e
+ IFS=$IFSOLD
+ set -- $line
+ shift
+
+ case "$@" in
+
+ 'Unknown keyword'*)
+ # warn only
+ if [ -x $(command -v logger >/dev/null) ]; then
+ logger -t mdadm -p syslog.warning -- "$*"
+ elif [ -w /dev/console ]; then
+ echo "mdadm: $*" > /dev/console
+ else
+ echo "mdadm: $*" >&2
+ fi
+ ;;
+
+ 'stopped '*)
+ log_dev 0 $2 stopped
+ ;;
+
+ 'fail to stop array '*': Device or resource busy')
+ log_dev 1 ${5%:} busy
+ ;;
+
+ *) :;;
+ esac
+ done || exit $?
+ else
+ log_notice "disabled in $DEBIANCONFIG"
+ fi
+ ;;
+
+ restart)
+ ${0:-} stop
+ ${0:-} start
+ ;;
+
+ reload|force-reload)
+ PREFIX="Reloading RAID array"
+ log_notice "never anything to do"
+ ;;
+
+ *)
+ echo "Usage: ${0:-} {start|stop|restart}" >&2
+ exit 1;;
+
+esac
+
+exit 0
diff --git a/debian/mdadm-udeb.dirs b/debian/mdadm-udeb.dirs
new file mode 100644
index 00000000..a1e1b4bc
--- /dev/null
+++ b/debian/mdadm-udeb.dirs
@@ -0,0 +1,2 @@
+sbin
+usr/share/lintian/overrides
diff --git a/debian/mdadm.config b/debian/mdadm.config
new file mode 100644
index 00000000..7c15ac01
--- /dev/null
+++ b/debian/mdadm.config
@@ -0,0 +1,123 @@
+#!/bin/sh -eu
+# Copyright © 2001-2004 Mario Jou/3en <joussen@debian.org>
+# Copyright © 2006 martin f. krafft <madduck@debian.org>
+# Distributable under the terms of the GNU GPL version 2.
+
+set +u # workaround for #369953
+. /usr/share/debconf/confmodule
+set -u
+
+CONFIG=/etc/mdadm/mdadm.conf
+ALTCONFIG=/etc/mdadm.conf
+[ ! -f $CONFIG ] && [ -f $ALTCONFIG ] && CONFIG=$ALTCONFIG
+
+DEBIANCONFIG=/etc/default/mdadm
+DEBIANCONFIG_OLD=/etc/mdadm/debian.conf
+
+if [ ! -e "$DEBIANCONFIG" ]; then
+ if [ -e "$DEBIANCONFIG_OLD" ]; then
+ DEBIANCONFIG="$DEBIANCONFIG_OLD"
+ fi
+fi
+
+INITRDSTART=''
+
+if [ -s $DEBIANCONFIG ] ; then
+ AUTOSTART=true
+ AUTOCHECK=true
+ START_DAEMON=true
+ MAILADDR=root
+
+ [ -f $DEBIANCONFIG ] && . $DEBIANCONFIG
+ if [ -f $CONFIG ]; then
+ MAILADDR=$(sed -rne 's,^MAILADDR[[:space:]]*([^[:space:]]+).*,\1,p' $CONFIG)
+ fi
+
+ # if $MAIL_TO from $DEBIANCONFIG is set, give it priority. This is because
+ # it was used in the command line invocation of the mdadm monitor, and thus
+ # overruled any existing setting in /etc/mdadm/mdadm.conf.
+ # There's a slight chance of loss of a configuration setting (MAILADDR in
+ # /etc/mdadm/mdadm.conf), but it's only an email address, and it is likely
+ # unused anyway, thus we run the risk.
+ [ -n "${MAIL_TO:-}" ] && MAILADDR="$MAIL_TO"
+
+ [ -n "$AUTOSTART" ] && db_set mdadm/autostart "$AUTOSTART"
+ [ -n "$AUTOCHECK" ] && db_set mdadm/autocheck "$AUTOCHECK"
+ [ -n "$START_DAEMON" ] && db_set mdadm/start_daemon "$START_DAEMON"
+ [ -n "$MAILADDR" ] && db_set mdadm/mail_to "$MAILADDR"
+fi
+
+db_input high mdadm/warning || true
+db_go
+
+if [ -z "$INITRDSTART" ]; then
+ ROOTRAIDDEV="$(df / | sed -ne 's,^\(/dev/[^[:space:]]\+\).*,\1,p')"
+ if ! mdadm --detail $ROOTRAIDDEV >/dev/null 2>&1; then
+ # you are using some funky setup. Let's be save...
+ INITRDSTART=all
+ else
+ INITRDSTART="$ROOTRAIDDEV"
+ fi
+fi
+
+[ -n "$INITRDSTART" ] && db_set mdadm/initrdstart "$INITRDSTART"
+
+while true; do
+ db_input low mdadm/initrdstart || true
+ db_go
+
+ db_get mdadm/initrdstart
+ INITRDSTART=$RET
+
+ case "$INITRDSTART" in
+ ''|none) INITRDSTART=none; break;;
+ all) break;;
+
+ /dev/md*|md*)
+ ARRAYS=''
+ ERROR=0
+ for i in $INITRDSTART; do
+ case "$i" in
+ /dev/md*)
+ t=${i%,} # just in case people use commas between devices
+ [ -b "$t" ] && ARRAYS="${ARRAYS:+$ARRAYS }$t" && continue
+ echo "E: invalid device: $i" >&2
+ ERROR=1; break
+ ;;
+ md*)
+ t=/dev/${i%,} # just in case people use commas between devices
+ [ -b "$t" ] && ARRAYS="${ARRAYS:+$ARRAYS }$t" && continue
+ echo "E: invalid device: $i" >&2
+ ERROR=1; break
+ ;;
+ *)
+ echo "E: unrecognised device string: $i" >&2
+ ERROR=1; break
+ ;;
+ esac
+ done
+ [ $ERROR -eq 0 ] && INITRDSTART="$ARRAYS" && break
+ ;;
+
+ *) :;;
+ esac
+done
+
+db_set mdadm/initrdstart "$INITRDSTART"
+
+if [ "$INITRDSTART" != all ]; then
+ db_input high mdadm/autostart || true
+ db_go
+fi
+
+db_input medium mdadm/autocheck || true
+db_go
+
+db_input medium mdadm/start_daemon || true
+db_go
+
+db_get mdadm/start_daemon
+if [ "$RET" = true ]; then
+ db_input medium mdadm/mail_to || true
+ db_go
+fi
diff --git a/debian/mdadm.cron.d b/debian/mdadm.cron.d
new file mode 100644
index 00000000..c1956c91
--- /dev/null
+++ b/debian/mdadm.cron.d
@@ -0,0 +1,9 @@
+#
+# cron.d/mdadm -- schedules periodic parity checks of RAID devices
+#
+# Copyright © 2006 martin f. krafft <madduck@madduck.net>
+# distributed under the terms of the Artistic Licence.
+#
+
+# by default, run at 01:06 on the first Sunday of each month.
+6 1 1-7 * 7 root [ -x /usr/share/mdadm/checkarray ] && /usr/share/mdadm/checkarray --cron --all --quiet
diff --git a/debian/mdadm.dirs b/debian/mdadm.dirs
new file mode 100644
index 00000000..0a7797d6
--- /dev/null
+++ b/debian/mdadm.dirs
@@ -0,0 +1,7 @@
+sbin
+etc/mdadm
+usr/share/mdadm
+usr/share/initramfs-tools/hooks
+usr/share/initramfs-tools/scripts/local-top
+usr/share/bug/mdadm
+usr/share/lintian/overrides
diff --git a/debian/mdadm.docs b/debian/mdadm.docs
new file mode 100644
index 00000000..c2b5457c
--- /dev/null
+++ b/debian/mdadm.docs
@@ -0,0 +1,3 @@
+debian/README.recipes
+debian/README.mdrun
+md.txt
diff --git a/debian/mdadm.examples b/debian/mdadm.examples
new file mode 100644
index 00000000..a71fd894
--- /dev/null
+++ b/debian/mdadm.examples
@@ -0,0 +1,2 @@
+mdadm.conf-example
+misc/syslog-events
diff --git a/debian/mdadm.init b/debian/mdadm.init
new file mode 100644
index 00000000..409dac56
--- /dev/null
+++ b/debian/mdadm.init
@@ -0,0 +1,63 @@
+#!/bin/sh -eu
+#
+# Start the RAID monitor daemon for all active md arrays if desired.
+#
+# Copyright © 2001-2005 Mario Jou/3en <joussen@debian.org>
+# Copyright © 2005-2006 Martin F. Krafft <madduck@debian.org>
+# Distributable under the terms of the GNU GPL version 2.
+
+MDADM=/sbin/mdadm
+RUNDIR=/var/run/mdadm
+PIDFILE=$RUNDIR/monitor.pid
+DEBIANCONFIG=/etc/default/mdadm
+
+test -x $MDADM || exit 0
+
+test -f /proc/mdstat || exit 0
+
+START_DAEMON=true
+test -f $DEBIANCONFIG && . $DEBIANCONFIG
+
+. /lib/lsb/init-functions
+
+is_true()
+{
+ case "${1:-}" in
+ [Yy]es|[Yy]|1|[Tt]|[Tt]rue) return 0;;
+ *) return 1;
+ esac
+}
+
+case "${1:-}" in
+ start)
+ if is_true $START_DAEMON; then
+ log_daemon_msg "Starting RAID monitoring service" "mdadm --monitor"
+ mkdir -p $RUNDIR
+ set +e
+ start-stop-daemon -S -p $PIDFILE -x $MDADM -- \
+ --monitor --pid-file $PIDFILE --daemonise --scan
+ log_end_msg $?
+ set -e
+ fi
+ ;;
+ stop)
+ if [ -f $PIDFILE ] ; then
+ log_daemon_msg "Stopping RAID monitoring service" "mdadm --monitor"
+ set +e
+ start-stop-daemon -K -p $PIDFILE -x $MDADM
+ rm -f $PIDFILE
+ log_end_msg $?
+ set -e
+ fi
+ ;;
+ restart|reload|force-reload)
+ ${0:-} stop
+ ${0:-} start
+ ;;
+ *)
+ echo "Usage: ${0:-} {start|stop|restart|reload|force-reload}" >&2
+ exit 1
+ ;;
+esac
+
+exit 0
diff --git a/debian/mdadm.lintian-overrides b/debian/mdadm.lintian-overrides
new file mode 100644
index 00000000..3114a0dd
--- /dev/null
+++ b/debian/mdadm.lintian-overrides
@@ -0,0 +1 @@
+mdadm: virtual-package-depends-without-real-package-depends recommends: mail-transport-agent
diff --git a/debian/mdadm.manpages b/debian/mdadm.manpages
new file mode 100644
index 00000000..4d140821
--- /dev/null
+++ b/debian/mdadm.manpages
@@ -0,0 +1,4 @@
+mdadm.8
+md.4
+mdadm.conf.5
+debian/mdrun.8
diff --git a/debian/mdadm.postinst b/debian/mdadm.postinst
new file mode 100644
index 00000000..3743bfc5
--- /dev/null
+++ b/debian/mdadm.postinst
@@ -0,0 +1,137 @@
+#!/bin/sh -eu
+# Copyright © 2001-2005 Mario Jou/3en <joussen@debian.org>
+# Copyright © 2005-2006 Martin F. Krafft <madduck@debian.org>
+# Distributable under the terms of the GNU GPL version 2.
+
+if [ "${1:-}" = "configure" ]; then
+
+ set +u # debconf workaround. See #369953
+ . /usr/share/debconf/confmodule
+ set -u
+
+ if [ ! -f /proc/mdstat ] && [ -x $(command -v modprobe) ] ; then
+ modprobe -k md >/dev/null 2>&1 || :
+ fi
+ if [ ! -f /proc/mdstat ]; then
+ echo 'W: failed to load RAID subsystem.' >&2
+ fi
+
+ if [ ! -e /dev/md15 ] \
+ && [ ! -e /dev/.static/dev/md15 ] \
+ && [ ! -e /dev/.devfsd ]; then
+
+ echo -n 'Generating device nodes... ' >&2
+ cd /dev
+ if /sbin/MAKEDEV md >&2 >/dev/null; then
+ echo 'done.' >&2
+ else
+ echo 'failed.' >&2
+ fi
+ fi
+
+ DEBIANCONFIG=/etc/default/mdadm
+ CONFIG=/etc/mdadm/mdadm.conf
+ ALTCONFIG=/etc/mdadm.conf
+ MDADM=/sbin/mdadm
+
+ if [ -z "$2" ] && [ -s /etc/mdctl/mdctl.conf ] && [ ! -f /etc/mdadm/mdadm.conf ] ; then
+ cp -a /etc/mdctl/mdctl.conf /etc/mdadm/mdadm.conf
+ fi
+
+ if [ ! -f $CONFIG ] && [ ! -f $ALTCONFIG ]; then
+ echo -n 'Generating mdadm.conf... ' >&2
+ if /usr/share/mdadm/mkconf >$CONFIG 2>/dev/null; then
+ mkdir -p /var/lib/mdadm
+ md5sum $CONFIG > /var/lib/mdadm/mdadm.conf-generated
+ echo done. >&2
+ else
+ echo "failed (/proc probably not mounted)." >&2
+ rm -f $CONFIG
+ fi
+ fi
+
+ [ ! -f $CONFIG ] && CONFIG=$ALTCONFIG
+
+ db_get mdadm/initrdstart
+ INITRDSTART="${RET:-all}"
+ db_get mdadm/autostart
+ AUTOSTART="${RET:-true}"
+ db_get mdadm/autocheck
+ AUTOCHECK="${RET:-true}"
+ db_get mdadm/start_daemon
+ START_DAEMON="${RET:-true}"
+ db_get mdadm/mail_to
+ MAILADDR="${RET:-root}"
+
+ cat <<_eof > $DEBIANCONFIG
+# mdadm Debian configuration
+#
+# You can run 'dpkg-reconfigure mdadm' to modify the values in this file, if
+# you want. You can also edit things here and changes will be preserved.
+#
+
+# INITRDSTART:
+# list of devices (or 'all') to start automatically when the initial ramdisk
+# loads. This list *must* include the device of your root filesystem.
+INITRDSTART='$INITRDSTART'
+
+# AUTOSTART:
+# should mdadm start devices listed in /etc/mdadm/mdadm.conf automatically
+# during boot?
+AUTOSTART=$AUTOSTART
+
+# AUTOCHECK:
+# should mdadm run periodic parity checks over your arrays? See
+# /etc/cron.d/mdadm.
+AUTOCHECK=$AUTOCHECK
+
+# START_DAEMON:
+# should mdadm start the RAID monitoring daemon during boot?
+START_DAEMON=$START_DAEMON
+
+# MAIL_TO
+# this variable is now managed in /etc/mdadm/mdadm.conf (MAILADDR).
+# Please see mdadm.conf(5).
+
+# USE_DEPRECATED_MDRUN=
+# mdrun is deprecated. If you still want to use it without a warning, set
+# the following to true.
+#
+# NOTE: changes to this variable will not be preserved!
+#
+USE_DEPRECATED_MDRUN=false
+_eof
+
+ if grep -q '^MAILADDR' $CONFIG; then
+ sed -i -e "s,^MAILADDR.*,MAILADDR $MAILADDR," $CONFIG
+ else
+ echo "MAILADDR $MAILADDR" >> $CONFIG
+ fi
+
+ db_stop
+
+ command -v update-initramfs >/dev/null && update-initramfs -u
+fi
+
+# remove S04 installed as a fix to #294404 as it does not work
+# we only remove the startup links if the previous version was 1.9.0-2.1 (the
+# one with the erroneous fix) and the S04 link exists to make sure that
+# chances are minimised to overwrite admin changes (even though this would
+# not affect woody upgraders)
+if [ "${1:-}" = "configure" ] && [ "${2:-}" = "1.9.0-2.1" ] \
+ && test -L /etc/rcS.d/S04mdadm-raid;
+then
+ update-rc.d -f mdadm-raid remove
+fi
+
+#DEBHELPER#
+
+# do start mdadm-raid, but don't use dh_installinit to do so, to prevent
+# stopping raids on remove/upgrade.
+if [ -x /etc/init.d/mdadm-raid ]; then
+ if [ -x "$(command -v invoke-rc.d)" ]; then
+ invoke-rc.d mdadm-raid start || exit $?
+ else
+ /etc/init.d/mdadm-raid start || exit $?
+ fi
+fi
diff --git a/debian/mdadm.postrm b/debian/mdadm.postrm
new file mode 100644
index 00000000..84a7f141
--- /dev/null
+++ b/debian/mdadm.postrm
@@ -0,0 +1,30 @@
+#! /bin/sh -eu
+# Copyright © 2001,2002 Mario Jou/3en <joussen@debian.org>
+# Copyright © 2006 Martin F. Krafft <madduck@debian.org>
+# Distributable under the terms of the GNU GPL version 2.
+
+case "${1:-}" in
+ remove)
+ if command -v update-initramfs >/dev/null; then
+ echo "W: mdadm: I'll update the initramfs, but if you need RAID to boot" >&2
+ echo "W: mdadm: with initramfs, you'll be screwed!" >&2
+ update-initramfs -u
+ fi
+ ;;
+
+ purge)
+ rm -f /etc/default/mdadm
+ if [ -f /var/lib/mdadm/mdadm.conf-generated ]; then
+ rm -f /etc/mdadm/mdadm.conf
+ fi
+ rm -rf /var/lib/mdadm
+ ;;
+
+ *) :;;
+esac
+
+# just in case somebody actually purges mdadm, we need this to make debconf
+# (added by dh_installdebconf) behave. See #369953
+set +u
+
+#DEBHELPER#
diff --git a/debian/mdadm.preinst b/debian/mdadm.preinst
new file mode 100644
index 00000000..9cffd3da
--- /dev/null
+++ b/debian/mdadm.preinst
@@ -0,0 +1,12 @@
+#!/bin/sh -eu
+# Copyright © 2006 martin f. krafft <madduck@debian.org>
+# Distributed under the terms of the Artistic Licence.
+
+# migrate old configuration from *way back then*
+DEBIANCONFIG=/etc/default/mdadm
+OLDCONFIG=/etc/mdadm/debian.conf
+if [ -s $OLDCONFIG ] && [ ! -f $DEBIANCONFIG ]; then
+ mv $OLDCONFIG $DEBIANCONFIG
+fi
+
+#DEBHELPER#
diff --git a/debian/mdadm.templates b/debian/mdadm.templates
new file mode 100644
index 00000000..071f7063
--- /dev/null
+++ b/debian/mdadm.templates
@@ -0,0 +1,71 @@
+Template: mdadm/warning
+Type: note
+_Description: Initialise the superblock if you reuse hard disks
+ WARNING! If you are using hard disks which have RAID superblocks from earlier
+ installations in different RAID arrays, you MUST zero each superblock
+ *before* activating the autostart feature.
+ .
+ To do this, do not start the RAID devices automatically. First, zero the
+ superblock (mdadm --zero-superblock /dev/mdX). Next, use `dpkg-reconfigure
+ mdadm` to reactivate the autostart feature.
+ .
+ If you manage your RAIDs otherwise (e.g. EVMS), either disable autostart, or
+ ensure that /etc/mdadm/mdadm.conf only lists those arrays you want to start
+ by mdadm.
+
+Template: mdadm/initrdstart
+Type: string
+Default: all
+_Description: RAID arrays needed for the root filesystem:
+ If your system has its root filesystem on a RAID volume, it needs to be
+ started early during the boot sequence. If your root filesystem is on
+ a logical volume (LVM), which is on RAID, all constituent arrays need to be
+ started.
+ .
+ If you know exactly which arrays are needed to bring up the root filesystem,
+ enter them here. Alternatively, enter 'all' to simply start all available
+ arrays. If you do not need or want to start any arrays for the root
+ filesystem, leave the answer blank (or enter 'none').
+ .
+ You have the option to start all other arrays (those not needed for the root
+ filesystem) later in the boot sequence. Doing so will give you greater
+ control over the arrays with the mdadm configuration file. Starting all
+ arrays at boot-time may be safer though.
+ .
+ Please enter a space-separated list of devices, or 'all'. You may omit the
+ leading '/dev/' and just enter e.g. "md0 md1".
+
+Template: mdadm/autostart
+Type: boolean
+Default: true
+_Description: Do you want to start RAID devices automatically?
+ Once the base system has come up, mdadm can start all RAID devices specified
+ in /etc/mdadm/mdadm.conf, which have not yet been started. Unless you have
+ compiled RAID support into the kernel (in which case all RAID arrays with
+ partitions of type 0xfd (and only those) are started automatically anyway),
+ this is probably what you want.
+
+Template: mdadm/autocheck
+Type: boolean
+Default: true
+_Description: Should mdadm run monthly parity checks of the RAID arrays?
+ If your kernel supports it (>> 2.6.14), mdadm can periodically check the
+ parity of your RAID devices. This may be a resource-intensive process,
+ depending on your setup, but it could help prevent rare cases of data loss.
+ .
+ The default, if turned on, is to run the checks on the first Sunday of every
+ month at 01:06 o'clock.
+
+Template: mdadm/start_daemon
+Type: boolean
+Default: true
+_Description: Do you want to start the RAID monitoring daemon?
+ The RAID monitor daemon sends email notifications in response to important
+ RAID events (such as a disk failure). You probably want to enable it.
+
+Template: mdadm/mail_to
+Type: string
+Default: root
+_Description: Recipient for email notifications:
+ Please enter the email address of the user who should get the email
+ notification for these important RAID events.
diff --git a/debian/mdrun b/debian/mdrun
new file mode 100644
index 00000000..cb63ff3d
--- /dev/null
+++ b/debian/mdrun
@@ -0,0 +1,189 @@
+#!/bin/sh
+
+# mdrun, (c) Eduard Bloch <blade@debian.org> 2003
+
+# Usage:
+# Without arguments: autodetect all RAID partitions and activate MDs
+# Arguments: [ DEVDIR ] NUMBER UUID [ <NUMBER UUID> ... ]
+# a number of number/uuid pairs, where NUMBER is the one from /dev/md/*
+# Argument: LIST
+# lists all raids in the syntax needed for the pairs (see above)
+
+# IMPORTANT: create /dev/fs directory if you have devfs support in the kernel
+# but do not want to mount it over /dev. Usage of /dev/fs directory will keep
+# mdrun away from /dev.
+
+# If the first argument is a directory, it will be used as a writeable
+# temporary directory for device nodes. mdrun needs mknod to create them
+# on-the-fly
+
+# Environment:
+# MORERAIDVOLUMES (list of strings) : additional raid disks to scan,
+# eg. loop devices
+
+is_true()
+{
+ case "$1" in
+ [Yy]es|[Yy]|1|[Tt]rue) return 0;;
+ *) return 1;
+ esac
+}
+
+warn()
+{
+ echo "$@" >&2
+}
+
+[ -f /etc/default/mdadm ] && . /etc/default/mdadm
+# disabled until the transition is complete
+if false && ! is_true $USE_DEPRECATED_MDRUN; then
+ warn
+ warn =========================
+ warn mdrun deprecation warning
+ warn =========================
+ warn
+ warn If you are seeing this during boot, please upgrade to a newer
+ warn version of the initramfs-tools package and ignore the rest of
+ warn this message.
+ warn
+ warn You are running $0, or your system is not properly configured
+ warn such that $0 is used as a fallback. Due to technical limitations,
+ warn $0 is deprecated and will be removed in a future relese of mdadm.
+ warn Please see /usr/share/doc/mdadm/README.mdrun for more info.
+ warn
+
+ TIMEOUT=15
+ if [ -r /dev/stdin ]; then
+ # cannot use -t or -n, which is not POSIX
+ read -p'Please hit return to continue...' resp < /dev/stdin >&2
+ warn
+ else
+ warn "Pausing for $TIMEOUT seconds..." >&2
+ sleep $TIMEOUT
+ fi
+fi
+
+if ! test -e /proc/partitions ; then
+ echo "/proc not mounted!"
+ exit 1
+fi
+
+DEVDIR=/dev
+
+if [ -d "$1" ] ; then
+ AUTOCREATE=true
+ DEVDIR="$1"
+ shift
+fi
+
+# For people that compile the kernel with devfs (means: different
+# proc/partitions content), but without auto-mounting it
+if ! uname -r | grep "^2.6" 1>/dev/null && [ -z "$AUTOCREATE" ] && grep " devfs" /proc/filesystems >/dev/null 2>&1 && ! grep "^devfs" /proc/mounts >/dev/null 2>&1 ; then
+
+ mkdir /dev/fs 2>/dev/null
+ # if we can do it - good, we will use it. Otherwise, use /dev even if it is ugly
+
+ # mount devfs for now to make the device names match, umount later
+ if [ -d /dev/fs ] ; then
+ DEVDIR=/dev/fs
+ fi
+ mount none $DEVDIR -tdevfs
+ UMNTDEVFS="umount $DEVDIR"
+fi
+
+# arr(array, index): return contents in array[index]; as with Bourne shell
+# in general, there is no easy way to distinguish between index not
+# existing and empty string assigned.
+arr() { sa_i=`arr_index $2`; eval "echo \"\$$1_${sa_i}\""; unset sa_i; }
+
+# seterr(array, index, value): assign the given value to array[index].
+setarr() { sa_i=`arr_index $2`; eval "$1_${sa_i}=\"$3\""; unset sa_i; }
+
+# arr_index(index): make sure the given index is valid for use.
+arr_index() { echo $1 | sed -e 's/:/_/g' | sed 's;/;_;g'; }
+
+
+BASE=$DEVDIR/md
+export BASE
+#devfs
+test -d $BASE && BASE=$BASE/
+
+next_free_md() {
+ for raidnr in 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24; do
+ if ! mdadm -D $BASE$raidnr >/dev/null 2>&1 ; then
+ echo $BASE$raidnr
+ return 0
+ fi
+ done
+ return 1
+}
+
+listpairs() {
+ for NUMBER in `cat /proc/mdstat | grep "^md. : active" | sed -e 's/^md\(.\) :.*/\1/'`; do
+ echo $NUMBER
+ mdadm -D ${BASE}$NUMBER 2>/dev/null |grep UUID | sed 's/.*: \(.*\)/\1/'
+ done
+}
+
+if [ "$1" = LIST ] ; then
+ echo `listpairs`
+ $UMNTDEVFS
+ exit 0
+fi
+
+DEVDIRESC=$(echo $DEVDIR | sed -e 's!/!\\/!g')
+if [ "$AUTOCREATE" ] ; then
+ CREATECMD=$(sed -e "s/.*major.*//; s/.*\ \([:0-9:]\+\)\ \+\ \([:0-9:]\+\)\ \+\ [:0-9:]\+\ \+\([:a-z0-9\/:]\+\).*/mknod \3 b \1 \2 ; / ; s/\//_/g" < /proc/partitions)
+ export CREATECMD
+ export DEVDIR
+ # and we need array nodes, of course
+ (
+ cd $DEVDIR ;
+ eval $CREATECMD ;
+ for x in 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 ; do
+ mknod ${BASE}$x b 9 $x
+ done
+ )
+ PARTLIST=$(sed -e "s/.*major.*//; s/.*\ \([:0-9:]\+\)\ \+\ \([:0-9:]\+\)\ \+\ [:0-9:]\+\ \+\([:a-z0-9\/:]\+\).*/DEVDIR\3 /; s/\//_/g ; s/DEVDIR/$DEVDIRESC\//;" < /proc/partitions)
+else
+ PARTLIST=$(sed -e "s/.*major.*//; s/^[:0-9 :]* \([:a-z:].[:a-z0-9\/:]*\).*/\1/; s/^\([:a-z:].*\)/$DEVDIRESC\/\1/g" < /proc/partitions)
+fi
+
+for SRC in $PARTLIST $MORERAIDVOLUMES ; do
+ SUM=$(mdadm -E $SRC 2>/dev/null | grep UUID | sed 's/.*: \(.*\)/\1/')
+ for x in $SUM; do
+ UUIDS="$UUIDS $SUM"
+ setarr MDS $SUM "`arr MDS $SUM` $SRC"
+ done
+done
+
+if [ "$#" -gt 1 ] ; then
+ NUMBER=${BASE}$1
+ MD=$2
+ shift ; shift
+ if [ "`arr MDS $MD`" != "started" ] ; then
+ mdadm -A -a yes $NUMBER -f `arr MDS $MD` && setarr MDS $MD "started"
+ # just to be sure
+ ln /dev/md/$NUMBER /dev/md$NUMBER 2>/dev/null
+ fi
+fi
+
+# and process the rest, if it exists
+# do not touch active arrays
+#dropactive() {
+ for NUMBER in `cat /proc/mdstat | grep "^md. : active" | sed -e 's/^md\(.\) :.*/\1/'`; do
+ setarr MDS `mdadm -D ${BASE}$NUMBER 2>/dev/null |grep UUID | sed 's/.*: \(.*\)/\1/'` "started"
+ done
+#}
+
+
+for MD in $UUIDS; do
+ if [ "`arr MDS $MD`" != "started" ] ; then
+ NUMBER=`next_free_md`
+ mdadm -A -a yes $NUMBER -f `arr MDS $MD` && setarr MDS $MD "started"
+ # just to be sure
+ ln /dev/md/$NUMBER /dev/md$NUMBER 2>/dev/null
+ fi
+done
+
+$UMNTDEVFS
diff --git a/debian/mdrun.8 b/debian/mdrun.8
new file mode 100644
index 00000000..5d405c90
--- /dev/null
+++ b/debian/mdrun.8
@@ -0,0 +1,65 @@
+.\"
+.\" This man page writeen aug 13 3004 by Robert Collins <robertc@robertcollins.net>
+.\" This template provided by Tom Christiansen <tchrist@jhereg.perl.com>.
+.\"
+.TH MDRUN 8
+.SH NAME
+mdrun \- autodetect and activate all raid partitions
+.SH SYNOPSIS
+mdrun detects all (or named) raid partitions and activates them.
+
+Without arguments: autodetect all RAID partitions and activate MDs.
+.PP
+.B mdrun
+.B LIST
+.PP
+.B mdrun
+[
+.I DEVDIR
+]
+.I NUMBER UUID
+[
+.I NUMBER UUID ...
+]
+.SH WARNING
+mdrun has been deprecated on Debian. Please use
+.B mdadm
+instead. You can find information on how to use and configure
+.B mdadm
+in the
+.BR mdadm(8)
+and
+.BR mdadm.conf(5)
+manpages
+.SH OPTIONS
+.TP
+.B LIST
+lists all raids in the syntax needed for the number uuid pairs the alternative syntax accepts.
+.TP
+.I DEVDIR
+An optional override for the /dev directory.
+.TP
+.I NUMBER UUID
+pairs of number, uuids that define a md element to run. number comes from /dev/md/*.
+.SH "RETURN VALUE"
+Nothing is displayed and mdrun returns 0 on success.
+.SH ERRORS
+On error, non-zero is returned, and a diagnostic string is printed.
+.SH EXAMPLES
+mdrun.
+.SH ENVIRONMENT
+MORERAIDVOLUMES (list of strings) : additional raid disks to scan, eg. loop devices.
+.SH NOTES
+If the first argument is a directory, it will be used as a writeable temporary directory for device nodes.
+mdrun needs mknod to create them on-the-fly.
+.SH CAVEATS
+Create /dev/fs directory if you have devfs support in the kernel but do not want to mount it over /dev.
+Usage of /dev/fs directory will keep mdrun away from /dev.
+.SH AUTHOR
+Eduard Bloch <blade@debian.org>.
+.SH HISTORY
+This man page was created Aug 13 2004 by Robert Collins.
+.SH "SEE ALSO"
+.BR mdadm (8).
+
+
diff --git a/debian/mkconf b/debian/mkconf
new file mode 100644
index 00000000..ab73898f
--- /dev/null
+++ b/debian/mkconf
@@ -0,0 +1,30 @@
+#!/bin/sh -eu
+#
+# mkconf -- outputs valid mdadm.conf contents for the local system
+#
+# Copyright © 2006 martin f. krafft <madduck@madduck.net>
+# distributed under the terms of the Artistic Licence.
+#
+
+MDADM=/sbin/mdadm
+DEBIANCONFIG=/etc/default/mdadm
+
+test -f $DEBIANCONFIG && . $DEBIANCONFIG
+
+if [ -r /proc/partitions ]; then
+ echo DEVICE partitions
+else
+ echo E: cannot read /proc/partitions and thus cannot do my magic. >&2
+ exit 1
+fi
+
+echo CREATE owner=root group=disk mode=0660 auto=yes metadata=1
+
+if [ ! -r /proc/mdstat ]; then
+ echo W: RAID subsystem is not loaded, thus I cannot scan for arrays. >&2
+else
+ # || : is workaround for #367901
+ $MDADM --examine --scan --config=partitions || :
+fi
+
+exit 0
diff --git a/debian/patches/00list b/debian/patches/00list
new file mode 100644
index 00000000..5e6ff3b5
--- /dev/null
+++ b/debian/patches/00list
@@ -0,0 +1,2 @@
+01-mdadm.conf-location
+99-md.txt
diff --git a/debian/patches/01-mdadm.conf-location.dpatch b/debian/patches/01-mdadm.conf-location.dpatch
new file mode 100755
index 00000000..095ad5ff
--- /dev/null
+++ b/debian/patches/01-mdadm.conf-location.dpatch
@@ -0,0 +1,117 @@
+#! /bin/sh /usr/share/dpatch/dpatch-run
+## 01-mdadm.conf-location.dpatch by martin f. krafft <madduck@debian.org>
+##
+## All lines beginning with `## DP:' are a description of the patch.
+## DP: Reverses conffile locations to favour /etc/mdadm/mdadm.conf
+
+@DPATCH@
+diff -urNad mdadm-2.5.1~/Makefile mdadm-2.5.1/Makefile
+--- mdadm-2.5.1~/Makefile 2006-06-26 00:06:29.000000000 +0200
++++ mdadm-2.5.1/Makefile 2006-06-26 00:06:45.936514898 +0200
+@@ -48,8 +48,8 @@
+ endif
+
+ SYSCONFDIR = /etc
+-CONFFILE = $(SYSCONFDIR)/mdadm.conf
+-CONFFILE2 = $(SYSCONFDIR)/mdadm/mdadm.conf
++CONFFILE = $(SYSCONFDIR)/mdadm/mdadm.conf
++CONFFILE2 = $(SYSCONFDIR)/mdadm.conf
+ MAILCMD =/usr/sbin/sendmail -t
+ CONFFILEFLAGS = -DCONFFILE=\"$(CONFFILE)\" -DCONFFILE2=\"$(CONFFILE2)\"
+ CFLAGS = $(CWFLAGS) $(CXFLAGS) -DSendmail=\""$(MAILCMD)"\" $(CONFFILEFLAGS)
+diff -urNad mdadm-2.5.1~/ReadMe.c mdadm-2.5.1/ReadMe.c
+--- mdadm-2.5.1~/ReadMe.c 2006-06-25 23:52:13.000000000 +0200
++++ mdadm-2.5.1/ReadMe.c 2006-06-26 00:06:34.108601730 +0200
+@@ -504,7 +504,7 @@
+
+
+ char Help_config[] =
+-"The /etc/mdadm.conf config file:\n\n"
++"The /etc/mdadm/mdadm.conf config file:\n\n"
+ " The config file contains, apart from blank lines and comment lines that\n"
+ " start with a hash(#), four sorts of configuration lines: array lines, \n"
+ " device lines, mailaddr lines and program lines.\n"
+diff -urNad mdadm-2.5.1~/mdadm.8 mdadm-2.5.1/mdadm.8
+--- mdadm-2.5.1~/mdadm.8 2006-06-25 23:52:13.000000000 +0200
++++ mdadm-2.5.1/mdadm.8 2006-06-26 00:06:34.116601671 +0200
+@@ -225,9 +225,9 @@
+ .TP
+ .BR -c ", " --config=
+ Specify the config file. Default is to use
+-.BR /etc/mdadm.conf ,
++.BR /etc/mdadm/mdadm.conf ,
+ or if that is missing, then
+-.BR /etc/mdadm/mdadm.conf .
++.BR /etc/mdadm.conf .
+ If the config file given is
+ .B partitions
+ then nothing will be read, but
+@@ -253,7 +253,7 @@
+ permission to get any missing information, like component devices,
+ array devices, array identities, and alert destination from the
+ configuration file:
+-.BR /etc/mdadm.conf .
++.BR /etc/mdadm/mdadm.conf .
+ One exception is MISC mode when using
+ .B --detail
+ or
+@@ -985,7 +985,7 @@
+ or requested with (a possibly implicit)
+ .B --scan.
+ In the later case,
+-.B /etc/mdadm.conf
++.B /etc/mdadm/mdadm.conf
+ is used.
+
+ If
+@@ -1260,7 +1260,7 @@
+ .B --scan
+ will cause the output to be less detailed and the format to be
+ suitable for inclusion in
+-.BR /etc/mdadm.conf .
++.BR /etc/mdadm/mdadm.conf .
+ The exit status of
+ .I mdadm
+ will normally be 0 unless
+@@ -1296,7 +1296,7 @@
+ then multiple devices that are components of the one array
+ are grouped together and reported in a single entry suitable
+ for inclusion in
+-.BR /etc/mdadm.conf .
++.BR /etc/mdadm/mdadm.conf .
+
+ Having
+ .B --scan
+@@ -1706,7 +1706,7 @@
+ on Monitor mode.
+
+
+-.SS /etc/mdadm.conf
++.SS /etc/mdadm/mdadm.conf
+
+ The config file lists which devices may be scanned to see if
+ they contain MD super block, and gives identifying information
+diff -urNad mdadm-2.5.1~/mdadm.conf.5 mdadm-2.5.1/mdadm.conf.5
+--- mdadm-2.5.1~/mdadm.conf.5 2006-06-25 23:52:13.000000000 +0200
++++ mdadm-2.5.1/mdadm.conf.5 2006-06-26 00:06:34.116601671 +0200
+@@ -2,7 +2,7 @@
+ .SH NAME
+ mdadm.conf \- configuration for management of Software Raid with mdadm
+ .SH SYNOPSIS
+-/etc/mdadm.conf
++/etc/mdadm/mdadm.conf
+ .SH DESCRIPTION
+ .PP
+ .B mdadm
+diff -urNad mdadm-2.5.1~/mdassemble.8 mdadm-2.5.1/mdassemble.8
+--- mdadm-2.5.1~/mdassemble.8 2006-06-25 23:52:13.000000000 +0200
++++ mdadm-2.5.1/mdassemble.8 2006-06-26 00:06:34.116601671 +0200
+@@ -33,7 +33,7 @@
+
+ .SH FILES
+
+-.SS /etc/mdadm.conf
++.SS /etc/mdadm/mdadm.conf
+
+ The config file lists which devices may be scanned to see if
+ they contain MD super block, and gives identifying information
diff --git a/debian/patches/99-md.txt.dpatch b/debian/patches/99-md.txt.dpatch
new file mode 100755
index 00000000..eb56ba83
--- /dev/null
+++ b/debian/patches/99-md.txt.dpatch
@@ -0,0 +1,368 @@
+#! /bin/sh /usr/share/dpatch/dpatch-run
+## 99-md.txt.dpatch by martin f. krafft <madduck@debian.org>
+##
+## All lines beginning with `## DP:' are a description of the patch.
+## DP: No description.
+
+@DPATCH@
+diff -urNad mdadm-2.5.2~/md.txt mdadm-2.5.2/md.txt
+--- mdadm-2.5.2~/md.txt 1970-01-01 01:00:00.000000000 +0100
++++ mdadm-2.5.2/md.txt 2006-07-06 18:28:20.213989423 +0200
+@@ -0,0 +1,357 @@
++Tools that manage md devices can be found at
++ http://www.<country>.kernel.org/pub/linux/utils/raid/....
++
++
++Boot time assembly of RAID arrays
++---------------------------------
++
++You can boot with your md device with the following kernel command
++lines:
++
++for old raid arrays without persistent superblocks:
++ md=<md device no.>,<raid level>,<chunk size factor>,<fault level>,dev0,dev1,...,devn
++
++for raid arrays with persistent superblocks
++ md=<md device no.>,dev0,dev1,...,devn
++or, to assemble a partitionable array:
++ md=d<md device no.>,dev0,dev1,...,devn
++
++md device no. = the number of the md device ...
++ 0 means md0,
++ 1 md1,
++ 2 md2,
++ 3 md3,
++ 4 md4
++
++raid level = -1 linear mode
++ 0 striped mode
++ other modes are only supported with persistent super blocks
++
++chunk size factor = (raid-0 and raid-1 only)
++ Set the chunk size as 4k << n.
++
++fault level = totally ignored
++
++dev0-devn: e.g. /dev/hda1,/dev/hdc1,/dev/sda1,/dev/sdb1
++
++A possible loadlin line (Harald Hoyer <HarryH@Royal.Net>) looks like this:
++
++e:\loadlin\loadlin e:\zimage root=/dev/md0 md=0,0,4,0,/dev/hdb2,/dev/hdc3 ro
++
++
++Boot time autodetection of RAID arrays
++--------------------------------------
++
++When md is compiled into the kernel (not as module), partitions of
++type 0xfd are scanned and automatically assembled into RAID arrays.
++This autodetection may be suppressed with the kernel parameter
++"raid=noautodetect". As of kernel 2.6.9, only drives with a type 0
++superblock can be autodetected and run at boot time.
++
++The kernel parameter "raid=partitionable" (or "raid=part") means
++that all auto-detected arrays are assembled as partitionable.
++
++Boot time assembly of degraded/dirty arrays
++-------------------------------------------
++
++If a raid5 or raid6 array is both dirty and degraded, it could have
++undetectable data corruption. This is because the fact that it is
++'dirty' means that the parity cannot be trusted, and the fact that it
++is degraded means that some datablocks are missing and cannot reliably
++be reconstructed (due to no parity).
++
++For this reason, md will normally refuse to start such an array. This
++requires the sysadmin to take action to explicitly start the array
++desipite possible corruption. This is normally done with
++ mdadm --assemble --force ....
++
++This option is not really available if the array has the root
++filesystem on it. In order to support this booting from such an
++array, md supports a module parameter "start_dirty_degraded" which,
++when set to 1, bypassed the checks and will allows dirty degraded
++arrays to be started.
++
++So, to boot with a root filesystem of a dirty degraded raid[56], use
++
++ md-mod.start_dirty_degraded=1
++
++
++Superblock formats
++------------------
++
++The md driver can support a variety of different superblock formats.
++Currently, it supports superblock formats "0.90.0" and the "md-1" format
++introduced in the 2.5 development series.
++
++The kernel will autodetect which format superblock is being used.
++
++Superblock format '0' is treated differently to others for legacy
++reasons - it is the original superblock format.
++
++
++General Rules - apply for all superblock formats
++------------------------------------------------
++
++An array is 'created' by writing appropriate superblocks to all
++devices.
++
++It is 'assembled' by associating each of these devices with an
++particular md virtual device. Once it is completely assembled, it can
++be accessed.
++
++An array should be created by a user-space tool. This will write
++superblocks to all devices. It will usually mark the array as
++'unclean', or with some devices missing so that the kernel md driver
++can create appropriate redundancy (copying in raid1, parity
++calculation in raid4/5).
++
++When an array is assembled, it is first initialized with the
++SET_ARRAY_INFO ioctl. This contains, in particular, a major and minor
++version number. The major version number selects which superblock
++format is to be used. The minor number might be used to tune handling
++of the format, such as suggesting where on each device to look for the
++superblock.
++
++Then each device is added using the ADD_NEW_DISK ioctl. This
++provides, in particular, a major and minor number identifying the
++device to add.
++
++The array is started with the RUN_ARRAY ioctl.
++
++Once started, new devices can be added. They should have an
++appropriate superblock written to them, and then passed be in with
++ADD_NEW_DISK.
++
++Devices that have failed or are not yet active can be detached from an
++array using HOT_REMOVE_DISK.
++
++
++Specific Rules that apply to format-0 super block arrays, and
++ arrays with no superblock (non-persistent).
++-------------------------------------------------------------
++
++An array can be 'created' by describing the array (level, chunksize
++etc) in a SET_ARRAY_INFO ioctl. This must has major_version==0 and
++raid_disks != 0.
++
++Then uninitialized devices can be added with ADD_NEW_DISK. The
++structure passed to ADD_NEW_DISK must specify the state of the device
++and it's role in the array.
++
++Once started with RUN_ARRAY, uninitialized spares can be added with
++HOT_ADD_DISK.
++
++
++
++MD devices in sysfs
++-------------------
++md devices appear in sysfs (/sys) as regular block devices,
++e.g.
++ /sys/block/md0
++
++Each 'md' device will contain a subdirectory called 'md' which
++contains further md-specific information about the device.
++
++All md devices contain:
++ level
++ a text file indicating the 'raid level'. This may be a standard
++ numerical level prefixed by "RAID-" - e.g. "RAID-5", or some
++ other name such as "linear" or "multipath".
++ If no raid level has been set yet (array is still being
++ assembled), this file will be empty.
++
++ raid_disks
++ a text file with a simple number indicating the number of devices
++ in a fully functional array. If this is not yet known, the file
++ will be empty. If an array is being resized (not currently
++ possible) this will contain the larger of the old and new sizes.
++ Some raid level (RAID1) allow this value to be set while the
++ array is active. This will reconfigure the array. Otherwise
++ it can only be set while assembling an array.
++
++ chunk_size
++ This is the size if bytes for 'chunks' and is only relevant to
++ raid levels that involve striping (1,4,5,6,10). The address space
++ of the array is conceptually divided into chunks and consecutive
++ chunks are striped onto neighbouring devices.
++ The size should be atleast PAGE_SIZE (4k) and should be a power
++ of 2. This can only be set while assembling an array
++
++ component_size
++ For arrays with data redundancy (i.e. not raid0, linear, faulty,
++ multipath), all components must be the same size - or at least
++ there must a size that they all provide space for. This is a key
++ part or the geometry of the array. It is measured in sectors
++ and can be read from here. Writing to this value may resize
++ the array if the personality supports it (raid1, raid5, raid6),
++ and if the component drives are large enough.
++
++ metadata_version
++ This indicates the format that is being used to record metadata
++ about the array. It can be 0.90 (traditional format), 1.0, 1.1,
++ 1.2 (newer format in varying locations) or "none" indicating that
++ the kernel isn't managing metadata at all.
++
++ level
++ The raid 'level' for this array. The name will often (but not
++ always) be the same as the name of the module that implements the
++ level. To be auto-loaded the module must have an alias
++ md-$LEVEL e.g. md-raid5
++ This can be written only while the array is being assembled, not
++ after it is started.
++
++ new_dev
++ This file can be written but not read. The value written should
++ be a block device number as major:minor. e.g. 8:0
++ This will cause that device to be attached to the array, if it is
++ available. It will then appear at md/dev-XXX (depending on the
++ name of the device) and further configuration is then possible.
++
++ sync_speed_min
++ sync_speed_max
++ This are similar to /proc/sys/dev/raid/speed_limit_{min,max}
++ however they only apply to the particular array.
++ If no value has been written to these, of if the word 'system'
++ is written, then the system-wide value is used. If a value,
++ in kibibytes-per-second is written, then it is used.
++ When the files are read, they show the currently active value
++ followed by "(local)" or "(system)" depending on whether it is
++ a locally set or system-wide value.
++
++ sync_completed
++ This shows the number of sectors that have been completed of
++ whatever the current sync_action is, followed by the number of
++ sectors in total that could need to be processed. The two
++ numbers are separated by a '/' thus effectively showing one
++ value, a fraction of the process that is complete.
++
++ sync_speed
++ This shows the current actual speed, in K/sec, of the current
++ sync_action. It is averaged over the last 30 seconds.
++
++
++As component devices are added to an md array, they appear in the 'md'
++directory as new directories named
++ dev-XXX
++where XXX is a name that the kernel knows for the device, e.g. hdb1.
++Each directory contains:
++
++ block
++ a symlink to the block device in /sys/block, e.g.
++ /sys/block/md0/md/dev-hdb1/block -> ../../../../block/hdb/hdb1
++
++ super
++ A file containing an image of the superblock read from, or
++ written to, that device.
++
++ state
++ A file recording the current state of the device in the array
++ which can be a comma separated list of
++ faulty - device has been kicked from active use due to
++ a detected fault
++ in_sync - device is a fully in-sync member of the array
++ spare - device is working, but not a full member.
++ This includes spares that are in the process
++ of being recoverred to
++ This list make grow in future.
++
++ errors
++ An approximate count of read errors that have been detected on
++ this device but have not caused the device to be evicted from
++ the array (either because they were corrected or because they
++ happened while the array was read-only). When using version-1
++ metadata, this value persists across restarts of the array.
++
++ This value can be written while assembling an array thus
++ providing an ongoing count for arrays with metadata managed by
++ userspace.
++
++ slot
++ This gives the role that the device has in the array. It will
++ either be 'none' if the device is not active in the array
++ (i.e. is a spare or has failed) or an integer less than the
++ 'raid_disks' number for the array indicating which possition
++ it currently fills. This can only be set while assembling an
++ array. A device for which this is set is assumed to be working.
++
++ offset
++ This gives the location in the device (in sectors from the
++ start) where data from the array will be stored. Any part of
++ the device before this offset us not touched, unless it is
++ used for storing metadata (Formats 1.1 and 1.2).
++
++ size
++ The amount of the device, after the offset, that can be used
++ for storage of data. This will normally be the same as the
++ component_size. This can be written while assembling an
++ array. If a value less than the current component_size is
++ written, component_size will be reduced to this value.
++
++
++An active md device will also contain and entry for each active device
++in the array. These are named
++
++ rdNN
++
++where 'NN' is the possition in the array, starting from 0.
++So for a 3 drive array there will be rd0, rd1, rd2.
++These are symbolic links to the appropriate 'dev-XXX' entry.
++Thus, for example,
++ cat /sys/block/md*/md/rd*/state
++will show 'in_sync' on every line.
++
++
++
++Active md devices for levels that support data redundancy (1,4,5,6)
++also have
++
++ sync_action
++ a text file that can be used to monitor and control the rebuild
++ process. It contains one word which can be one of:
++ resync - redundancy is being recalculated after unclean
++ shutdown or creation
++ recover - a hot spare is being built to replace a
++ failed/missing device
++ idle - nothing is happening
++ check - A full check of redundancy was requested and is
++ happening. This reads all block and checks
++ them. A repair may also happen for some raid
++ levels.
++ repair - A full check and repair is happening. This is
++ similar to 'resync', but was requested by the
++ user, and the write-intent bitmap is NOT used to
++ optimise the process.
++
++ This file is writable, and each of the strings that could be
++ read are meaningful for writing.
++
++ 'idle' will stop an active resync/recovery etc. There is no
++ guarantee that another resync/recovery may not be automatically
++ started again, though some event will be needed to trigger
++ this.
++ 'resync' or 'recovery' can be used to restart the
++ corresponding operation if it was stopped with 'idle'.
++ 'check' and 'repair' will start the appropriate process
++ providing the current state is 'idle'.
++
++ mismatch_count
++ When performing 'check' and 'repair', and possibly when
++ performing 'resync', md will count the number of errors that are
++ found. The count in 'mismatch_cnt' is the number of sectors
++ that were re-written, or (for 'check') would have been
++ re-written. As most raid levels work in units of pages rather
++ than sectors, this my be larger than the number of actual errors
++ by a factor of the number of sectors in a page.
++
++Each active md device may also have attributes specific to the
++personality module that manages it.
++These are specific to the implementation of the module and could
++change substantially if the implementation changes.
++
++These currently include
++
++ stripe_cache_size (currently raid5 only)
++ number of entries in the stripe cache. This is writable, but
++ there are upper and lower limits (32768, 16). Default is 128.
++ strip_cache_active (currently raid5 only)
++ number of active entries in the stripe cache
diff --git a/debian/patches/Makefile b/debian/patches/Makefile
new file mode 100644
index 00000000..18a3802b
--- /dev/null
+++ b/debian/patches/Makefile
@@ -0,0 +1,3 @@
+.PHONY: 00list
+00list:
+ find . -name Makefile -prune -o -name 00list -prune -o -type f -print | sed -e 's,\./\(.*\)\.dpatch,\1,' | sort -n > $@
diff --git a/debian/po/POTFILES.in b/debian/po/POTFILES.in
new file mode 100644
index 00000000..04922385
--- /dev/null
+++ b/debian/po/POTFILES.in
@@ -0,0 +1 @@
+[type: gettext/rfc822deb] mdadm.templates
diff --git a/debian/po/cs.po b/debian/po/cs.po
new file mode 100644
index 00000000..0996c70d
--- /dev/null
+++ b/debian/po/cs.po
@@ -0,0 +1,213 @@
+#
+# Translators, if you are not familiar with the PO format, gettext
+# documentation is worth reading, especially sections dedicated to
+# this format, e.g. by running:
+# info -n '(gettext)PO Files'
+# info -n '(gettext)Header Entry'
+#
+# Some information specific to po-debconf are available at
+# /usr/share/doc/po-debconf/README-trans
+# or http://www.debian.org/intl/l10n/po-debconf/README-trans
+#
+# Developers do not need to manually edit POT or PO files.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: mdadm\n"
+"Report-Msgid-Bugs-To: pkg-mdadm-devel@lists.alioth.debian.org\n"
+"POT-Creation-Date: 2006-07-07 17:30+0200\n"
+"PO-Revision-Date: 2006-07-01 09:24+0200\n"
+"Last-Translator: Miroslav Kure <kurem@debian.cz>\n"
+"Language-Team: Czech <debian-l10n-czech@lists.debian.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1001
+msgid "Initialise the superblock if you reuse hard disks"
+msgstr "Při znovupoužití starších disků inicializujte superblok"
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1002
+msgid ""
+"WARNING! If you are using hard disks which have RAID superblocks from "
+"earlier installations in different RAID arrays, you MUST zero each "
+"superblock *before* activating the autostart feature."
+msgstr ""
+"VAROVÁNÍ! Používáte-li pevné disky, které obsahují RAID superbloky z "
+"dřívější instalace v jiném RAID poli, MUSÍTE všechny superbloky před "
+"použitím automatického spouštění vynulovat."
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1003
+msgid ""
+"To do this, do not start the RAID devices automatically. First, zero the "
+"superblock (mdadm --zero-superblock /dev/mdX). Next, use `dpkg-reconfigure "
+"mdadm` to reactivate the autostart feature."
+msgstr ""
+"Chcete-li to provést, nespouštějte RAID zařízení automaticky. Nejprve "
+"vynulujte superblok příkazem 'mdadm --zero-superblock /dev/mdX' a teprve "
+"poté můžete povolit automatické spouštění RAIDu příkazem 'dpkg-reconfigure "
+"mdadm'."
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1004
+msgid ""
+"If you manage your RAIDs otherwise (e.g. EVMS), either disable autostart, or "
+"ensure that /etc/mdadm/mdadm.conf only lists those arrays you want to start "
+"by mdadm."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2001
+msgid "RAID arrays needed for the root filesystem:"
+msgstr "RAID pole vyžadovaná pro kořenový souborový systém:"
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2002
+msgid ""
+"If your system has its root filesystem on a RAID volume, it needs to be "
+"started early during the boot sequence. If your root filesystem is on a "
+"logical volume (LVM), which is on RAID, all constituent arrays need to be "
+"started."
+msgstr ""
+"Pokud má váš systém svůj kořenový souborový systém umístěn na RAID svazku, "
+"musí být tento spuštěn během zavádění systému co nejdříve. Pokud se váš "
+"kořenový souborový systém nachází na logickém svazku LVM, který je vytvořen "
+"nad RAIDem, musí se spustit všechna související pole."
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2003
+msgid ""
+"If you know exactly which arrays are needed to bring up the root filesystem, "
+"enter them here. Alternatively, enter 'all' to simply start all available "
+"arrays. If you do not need or want to start any arrays for the root "
+"filesystem, leave the answer blank (or enter 'none')."
+msgstr ""
+"Jestliže přesně víte, která pole jsou potřeba pro připojení kořenového "
+"souborového systému, zadejte je prosím zde. Alternativně můžete spustit "
+"všechna dostupná pole zadáním 'all'. Nepotřebujete-li nebo nechcete-li "
+"spouštět pole pro kořenový souborový systém, ponechte odpověď prázdnou, "
+"případně zadejte 'none'."
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2004
+msgid ""
+"You have the option to start all other arrays (those not needed for the root "
+"filesystem) later in the boot sequence. Doing so will give you greater "
+"control over the arrays with the mdadm configuration file. Starting all "
+"arrays at boot-time may be safer though."
+msgstr ""
+"Všechna ostatní pole (ta, která nejsou potřeba pro kořenový souborový "
+"systém) můžete spustit později. Pokud tak učiníte, budete mít v "
+"konfiguračním souboru mdadm nad poli větší kontrolu. Na druhou stranu je "
+"spouštění všech polí hned na začátku zavádění o něco jistější volbou."
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2005
+msgid ""
+"Please enter a space-separated list of devices, or 'all'. You may omit the "
+"leading '/dev/' and just enter e.g. \"md0 md1\"."
+msgstr ""
+"Zařízení zadejte jako mezerami oddělený seznam, případně 'all'. Počáteční '/"
+"dev/' můžete vynechat a zadat jen např. \"md0 md1\"."
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3001
+msgid "Do you want to start RAID devices automatically?"
+msgstr "Chcete spouštět RAID zařízení automaticky?"
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3002
+#, fuzzy
+msgid ""
+"Once the base system has come up, mdadm can start all RAID devices specified "
+"in /etc/mdadm/mdadm.conf, which have not yet been started. Unless you have "
+"compiled RAID support into the kernel (in which case all RAID arrays with "
+"partitions of type 0xfd (and only those) are started automatically anyway), "
+"this is probably what you want."
+msgstr ""
+"Po zavedení základního systému může mdadm spustit všechna dosud nespuštěná "
+"RAID zařízení. Pokud nemáte zakompilovanou podporu RAIDu přímo v jádře (kdy "
+"se všechna RAID pole spustí zcela automaticky), pravděpodobně budete chtít "
+"tuto možnost povolit."
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4001
+msgid "Should mdadm run monthly parity checks of the RAID arrays?"
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4002
+msgid ""
+"If your kernel supports it (>> 2.6.14), mdadm can periodically check the "
+"parity of your RAID devices. This may be a resource-intensive process, "
+"depending on your setup, but it could help prevent rare cases of data loss."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4003
+msgid ""
+"The default, if turned on, is to run the checks on the first Sunday of every "
+"month at 01:06 o'clock."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5001
+msgid "Do you want to start the RAID monitoring daemon?"
+msgstr "Chcete spustit daemon pro monitorování RAIDu?"
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5002
+msgid ""
+"The RAID monitor daemon sends email notifications in response to important "
+"RAID events (such as a disk failure). You probably want to enable it."
+msgstr ""
+"Daemon pro monitorování RAIDu zasílá emailová upozornění na významné RAID "
+"události, např. selhání disku. Je rozumné tuto možnost povolit."
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6001
+msgid "Recipient for email notifications:"
+msgstr "Příjemce emailových upozornění:"
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6002
+msgid ""
+"Please enter the email address of the user who should get the email "
+"notification for these important RAID events."
+msgstr ""
+"Zadejte emailovou adresu uživatele, který má dostávat emailová upozornění "
+"při výskytu významných RAID událostí."
+
+#~ msgid ""
+#~ "If RAID devices are started automatically, all RAID devices are "
+#~ "discovered and assembled automatically at system startup. This option "
+#~ "should only be used if the md driver is compiled as a module. If it is "
+#~ "compiled into your kernel, the automatic startup will be performed at "
+#~ "boot time by the kernel and therefore you should not choose this option."
+#~ msgstr ""
+#~ "Jestliže jsou RAID zařízení spouštěna automaticky, jsou všechna RAID "
+#~ "zařízení rozpoznána a poskládána automaticky při zavádění systému. Tuto "
+#~ "volbu byste měli použít pouze v případě, že jste ovladač md zakompilovali "
+#~ "jako modul. Pokud jste jej zakompilovali přímo do jádra, o automatické "
+#~ "spuštění se postará samotné jádro a tedy tuto možnost nepotřebujete."
diff --git a/debian/po/de.po b/debian/po/de.po
new file mode 100644
index 00000000..e9541cc2
--- /dev/null
+++ b/debian/po/de.po
@@ -0,0 +1,207 @@
+#
+# Translators, if you are not familiar with the PO format, gettext
+# documentation is worth reading, especially sections dedicated to
+# this format, e.g. by running:
+# info -n '(gettext)PO Files'
+# info -n '(gettext)Header Entry'
+#
+# Some information specific to po-debconf are available at
+# /usr/share/doc/po-debconf/README-trans
+# or http://www.debian.org/intl/l10n/po-debconf/README-trans
+#
+# Developers do not need to manually edit POT or PO files.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: mdadm\n"
+"Report-Msgid-Bugs-To: pkg-mdadm-devel@lists.alioth.debian.org\n"
+"POT-Creation-Date: 2006-07-07 17:30+0200\n"
+"PO-Revision-Date: 2004-07-20 23:55+0200\n"
+"Last-Translator: Mario Joussen <joussen@debian.org>\n"
+"Language-Team: German <debian-l10n-german@lists.debian.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=ISO-8859-15\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1001
+msgid "Initialise the superblock if you reuse hard disks"
+msgstr ""
+"Initialisieren Sie den Superblock, wenn Sie Festplatten wieder verwenden."
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1002
+#, fuzzy
+msgid ""
+"WARNING! If you are using hard disks which have RAID superblocks from "
+"earlier installations in different RAID arrays, you MUST zero each "
+"superblock *before* activating the autostart feature."
+msgstr ""
+"WARNUNG! Wenn Sie Festplatten verwenden, die bereits einen md Superblock von "
+"einer vorherigen Installation in einem anderen RAID Verbund besitzen, so M?"
+"SSEN Sie diesen l?schen, bevor Sie die Autostart Funktion aktivieren."
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1003
+#, fuzzy
+msgid ""
+"To do this, do not start the RAID devices automatically. First, zero the "
+"superblock (mdadm --zero-superblock /dev/mdX). Next, use `dpkg-reconfigure "
+"mdadm` to reactivate the autostart feature."
+msgstr ""
+"Dazu starten Sie die RAID Laufwerke nicht automatisch und l?schen dann den "
+"Superblock (mdadm --zero-superblock /dev/xxx). Danach k?nnen Sie mit \"dpkg-"
+"reconfigure mdadm\" die Autostart Funktion aktivieren."
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1004
+msgid ""
+"If you manage your RAIDs otherwise (e.g. EVMS), either disable autostart, or "
+"ensure that /etc/mdadm/mdadm.conf only lists those arrays you want to start "
+"by mdadm."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2001
+msgid "RAID arrays needed for the root filesystem:"
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2002
+msgid ""
+"If your system has its root filesystem on a RAID volume, it needs to be "
+"started early during the boot sequence. If your root filesystem is on a "
+"logical volume (LVM), which is on RAID, all constituent arrays need to be "
+"started."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2003
+msgid ""
+"If you know exactly which arrays are needed to bring up the root filesystem, "
+"enter them here. Alternatively, enter 'all' to simply start all available "
+"arrays. If you do not need or want to start any arrays for the root "
+"filesystem, leave the answer blank (or enter 'none')."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2004
+msgid ""
+"You have the option to start all other arrays (those not needed for the root "
+"filesystem) later in the boot sequence. Doing so will give you greater "
+"control over the arrays with the mdadm configuration file. Starting all "
+"arrays at boot-time may be safer though."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2005
+msgid ""
+"Please enter a space-separated list of devices, or 'all'. You may omit the "
+"leading '/dev/' and just enter e.g. \"md0 md1\"."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3001
+#, fuzzy
+msgid "Do you want to start RAID devices automatically?"
+msgstr "M?chten Sie die RAID Laufwerke automatisch starten?"
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3002
+msgid ""
+"Once the base system has come up, mdadm can start all RAID devices specified "
+"in /etc/mdadm/mdadm.conf, which have not yet been started. Unless you have "
+"compiled RAID support into the kernel (in which case all RAID arrays with "
+"partitions of type 0xfd (and only those) are started automatically anyway), "
+"this is probably what you want."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4001
+msgid "Should mdadm run monthly parity checks of the RAID arrays?"
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4002
+msgid ""
+"If your kernel supports it (>> 2.6.14), mdadm can periodically check the "
+"parity of your RAID devices. This may be a resource-intensive process, "
+"depending on your setup, but it could help prevent rare cases of data loss."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4003
+msgid ""
+"The default, if turned on, is to run the checks on the first Sunday of every "
+"month at 01:06 o'clock."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5001
+#, fuzzy
+msgid "Do you want to start the RAID monitoring daemon?"
+msgstr "M?chten Sie den RAID-?berwachungsd?mon starten?"
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5002
+msgid ""
+"The RAID monitor daemon sends email notifications in response to important "
+"RAID events (such as a disk failure). You probably want to enable it."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6001
+#, fuzzy
+msgid "Recipient for email notifications:"
+msgstr "Empf?nger der Email-Benachrichtungen:"
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6002
+#, fuzzy
+msgid ""
+"Please enter the email address of the user who should get the email "
+"notification for these important RAID events."
+msgstr ""
+"Geben Sie bitte die Emailadresse des Benutzers an, der im Falle einer Status?"
+"nderung des RAIDs die Email-Benachrichtung erhalten soll."
+
+#~ msgid ""
+#~ "If RAID devices are started automatically, all RAID devices are "
+#~ "discovered and assembled automatically at system startup. This option "
+#~ "should only be used if the md driver is compiled as a module. If it is "
+#~ "compiled into your kernel, the automatic startup will be performed at "
+#~ "boot time by the kernel and therefore you should not choose this option."
+#~ msgstr ""
+#~ "Wenn die RAID Laufwerke automatisch gestartet werden, werden alle RAID "
+#~ "Laufwerke beim Systemstart automatisch gefunden und gestartet. Diese "
+#~ "Option sollte nur benutzt werden, falls der md Treiber als Modul "
+#~ "kompiliert wurde. Falls er in den Kernel einkompiliert wurde, f?hrt der "
+#~ "Kernel den automatischen Start beim Booten durch und Sie sollten diese "
+#~ "Option deshalb nicht ausw?hlen."
+
+#~ msgid ""
+#~ "When the RAID monitor daemon runs, email notifications are sent when a "
+#~ "disk belonging to a RAID array fails or changes its status for some "
+#~ "reason."
+#~ msgstr ""
+#~ "Wird der RAID-?berwachungsd?mon gestartet, so werden Email-"
+#~ "Benachrichtigungen verschickt, falls ein zum RAID geh?rendes Laufwerk "
+#~ "ausf?llt oder den Status ?ndert."
diff --git a/debian/po/fr.po b/debian/po/fr.po
new file mode 100644
index 00000000..def437cb
--- /dev/null
+++ b/debian/po/fr.po
@@ -0,0 +1,205 @@
+#
+# Translators, if you are not familiar with the PO format, gettext
+# documentation is worth reading, especially sections dedicated to
+# this format, e.g. by running:
+# info -n '(gettext)PO Files'
+# info -n '(gettext)Header Entry'
+#
+# Some information specific to po-debconf are available at
+# /usr/share/doc/po-debconf/README-trans
+# or http://www.debian.org/intl/l10n/po-debconf/README-trans
+#
+# Developers do not need to manually edit POT or PO files.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: mdadm\n"
+"Report-Msgid-Bugs-To: pkg-mdadm-devel@lists.alioth.debian.org\n"
+"POT-Creation-Date: 2006-07-07 17:30+0200\n"
+"PO-Revision-Date: 2005-08-13 12:30+0200\n"
+"Last-Translator: Eric Madesclair<eric-m@wanadoo.fr>\n"
+"Language-Team: French <debian-l10n-french@lists.debian.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=ISO-8859-1\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1001
+msgid "Initialise the superblock if you reuse hard disks"
+msgstr "Si vous rutilisez des disques durs, initialisez leur superbloc."
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1002
+#, fuzzy
+msgid ""
+"WARNING! If you are using hard disks which have RAID superblocks from "
+"earlier installations in different RAID arrays, you MUST zero each "
+"superblock *before* activating the autostart feature."
+msgstr ""
+"ATTENTION: Si vous utilisez des disques durs dont le superbloc a dj t "
+"crit par md lors d'une installation dans une autre matrice RAID, vous DEVEZ "
+"effacer leur superbloc avant d'activer le dmarrage automatique."
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1003
+msgid ""
+"To do this, do not start the RAID devices automatically. First, zero the "
+"superblock (mdadm --zero-superblock /dev/mdX). Next, use `dpkg-reconfigure "
+"mdadm` to reactivate the autostart feature."
+msgstr ""
+"Pour faire cela, vous ne devez pas dmarrer automatiquement les "
+"priphriques RAID. Commencez par effacer le superbloc (mdadm --zero-"
+"superblock /dev/xxx) puis activez le dmarrage automatique en utilisant "
+"dpkg-reconfigure mdadm."
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1004
+msgid ""
+"If you manage your RAIDs otherwise (e.g. EVMS), either disable autostart, or "
+"ensure that /etc/mdadm/mdadm.conf only lists those arrays you want to start "
+"by mdadm."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2001
+msgid "RAID arrays needed for the root filesystem:"
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2002
+msgid ""
+"If your system has its root filesystem on a RAID volume, it needs to be "
+"started early during the boot sequence. If your root filesystem is on a "
+"logical volume (LVM), which is on RAID, all constituent arrays need to be "
+"started."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2003
+msgid ""
+"If you know exactly which arrays are needed to bring up the root filesystem, "
+"enter them here. Alternatively, enter 'all' to simply start all available "
+"arrays. If you do not need or want to start any arrays for the root "
+"filesystem, leave the answer blank (or enter 'none')."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2004
+msgid ""
+"You have the option to start all other arrays (those not needed for the root "
+"filesystem) later in the boot sequence. Doing so will give you greater "
+"control over the arrays with the mdadm configuration file. Starting all "
+"arrays at boot-time may be safer though."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2005
+msgid ""
+"Please enter a space-separated list of devices, or 'all'. You may omit the "
+"leading '/dev/' and just enter e.g. \"md0 md1\"."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3001
+#, fuzzy
+msgid "Do you want to start RAID devices automatically?"
+msgstr "Voulez-vous dmarrer automatiquement les priphriques RAID?"
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3002
+msgid ""
+"Once the base system has come up, mdadm can start all RAID devices specified "
+"in /etc/mdadm/mdadm.conf, which have not yet been started. Unless you have "
+"compiled RAID support into the kernel (in which case all RAID arrays with "
+"partitions of type 0xfd (and only those) are started automatically anyway), "
+"this is probably what you want."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4001
+msgid "Should mdadm run monthly parity checks of the RAID arrays?"
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4002
+msgid ""
+"If your kernel supports it (>> 2.6.14), mdadm can periodically check the "
+"parity of your RAID devices. This may be a resource-intensive process, "
+"depending on your setup, but it could help prevent rare cases of data loss."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4003
+msgid ""
+"The default, if turned on, is to run the checks on the first Sunday of every "
+"month at 01:06 o'clock."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5001
+#, fuzzy
+msgid "Do you want to start the RAID monitoring daemon?"
+msgstr "Voulez-vous dmarrer le dmon de surveillance RAID?"
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5002
+msgid ""
+"The RAID monitor daemon sends email notifications in response to important "
+"RAID events (such as a disk failure). You probably want to enable it."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6001
+#, fuzzy
+msgid "Recipient for email notifications:"
+msgstr "Destinataire des notifications par courriel:"
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6002
+#, fuzzy
+msgid ""
+"Please enter the email address of the user who should get the email "
+"notification for these important RAID events."
+msgstr ""
+"Veuillez indiquer l'adresse lectronique de l'utilisateur qui doit recevoir "
+"les notifications si l'tat des priphriques RAID change."
+
+#~ msgid ""
+#~ "If RAID devices are started automatically, all RAID devices are "
+#~ "discovered and assembled automatically at system startup. This option "
+#~ "should only be used if the md driver is compiled as a module. If it is "
+#~ "compiled into your kernel, the automatic startup will be performed at "
+#~ "boot time by the kernel and therefore you should not choose this option."
+#~ msgstr ""
+#~ "Si les priphriques RAID sont dmarrs automatiquement, ceux-ci peuvent "
+#~ "tre recherchs et runis automatiquement au dmarrage du systme. Cela "
+#~ "n'est possible que si vous utilisez le pilote md en module. S'il est "
+#~ "compil dans le noyau, celui-ci dmarre les priphriques automatiquement "
+#~ "et vous n'avez donc pas besoin de slectionner cette option."
+
+#~ msgid ""
+#~ "When the RAID monitor daemon runs, email notifications are sent when a "
+#~ "disk belonging to a RAID array fails or changes its status for some "
+#~ "reason."
+#~ msgstr ""
+#~ "Quand le dmon de surveillance des priphriques RAID est utilis, une "
+#~ "notification par courriel est envoye en cas de dfaillance ou de "
+#~ "changement d'tat d'un disque appartenant un ensemble RAID."
diff --git a/debian/po/ja.po b/debian/po/ja.po
new file mode 100644
index 00000000..78d33330
--- /dev/null
+++ b/debian/po/ja.po
@@ -0,0 +1,205 @@
+#
+# Translators, if you are not familiar with the PO format, gettext
+# documentation is worth reading, especially sections dedicated to
+# this format, e.g. by running:
+# info -n '(gettext)PO Files'
+# info -n '(gettext)Header Entry'
+#
+# Some information specific to po-debconf are available at
+# /usr/share/doc/po-debconf/README-trans
+# or http://www.debian.org/intl/l10n/po-debconf/README-trans
+#
+# Developers do not need to manually edit POT or PO files.
+#
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: mdadm 1.7.0-2\n"
+"Report-Msgid-Bugs-To: pkg-mdadm-devel@lists.alioth.debian.org\n"
+"POT-Creation-Date: 2006-07-07 17:30+0200\n"
+"PO-Revision-Date: 2004-10-31 00:32+0900\n"
+"Last-Translator: Hideki Yamane <henrich@samba.gr.jp>\n"
+"Language-Team: Japanese <debian-japanese@lists.debian.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=EUC-JP\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1001
+msgid "Initialise the superblock if you reuse hard disks"
+msgstr "ϡɥǥѤ superblock Ƥ"
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1002
+#, fuzzy
+msgid ""
+"WARNING! If you are using hard disks which have RAID superblocks from "
+"earlier installations in different RAID arrays, you MUST zero each "
+"superblock *before* activating the autostart feature."
+msgstr ""
+"ٹ! Υ󥹥ȡˤäưۤʤ RAID 쥤Ǥ md superblock ݻ"
+"ƤϡɥǥȤäƤ硢ưưǽͭˤ "
+"superblock 򥼥Ǿ񤭤뤳ȤɬספǤ"
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1003
+#, fuzzy
+msgid ""
+"To do this, do not start the RAID devices automatically. First, zero the "
+"superblock (mdadm --zero-superblock /dev/mdX). Next, use `dpkg-reconfigure "
+"mdadm` to reactivate the autostart feature."
+msgstr ""
+"ԤˤϡRAID ǥХưŪ˵ư superblock 򥼥Ǿ񤭤"
+"ޤ (mdadm --zero-superblock /dev/xxx) ơưưǽͭˤ뤿"
+"ˤ 'dpkg-reconfigure mdadm' ޥɤѤǤޤ"
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1004
+msgid ""
+"If you manage your RAIDs otherwise (e.g. EVMS), either disable autostart, or "
+"ensure that /etc/mdadm/mdadm.conf only lists those arrays you want to start "
+"by mdadm."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2001
+msgid "RAID arrays needed for the root filesystem:"
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2002
+msgid ""
+"If your system has its root filesystem on a RAID volume, it needs to be "
+"started early during the boot sequence. If your root filesystem is on a "
+"logical volume (LVM), which is on RAID, all constituent arrays need to be "
+"started."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2003
+msgid ""
+"If you know exactly which arrays are needed to bring up the root filesystem, "
+"enter them here. Alternatively, enter 'all' to simply start all available "
+"arrays. If you do not need or want to start any arrays for the root "
+"filesystem, leave the answer blank (or enter 'none')."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2004
+msgid ""
+"You have the option to start all other arrays (those not needed for the root "
+"filesystem) later in the boot sequence. Doing so will give you greater "
+"control over the arrays with the mdadm configuration file. Starting all "
+"arrays at boot-time may be safer though."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2005
+msgid ""
+"Please enter a space-separated list of devices, or 'all'. You may omit the "
+"leading '/dev/' and just enter e.g. \"md0 md1\"."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3001
+#, fuzzy
+msgid "Do you want to start RAID devices automatically?"
+msgstr "RAID ǥХưŪ˵ưޤ?"
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3002
+msgid ""
+"Once the base system has come up, mdadm can start all RAID devices specified "
+"in /etc/mdadm/mdadm.conf, which have not yet been started. Unless you have "
+"compiled RAID support into the kernel (in which case all RAID arrays with "
+"partitions of type 0xfd (and only those) are started automatically anyway), "
+"this is probably what you want."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4001
+msgid "Should mdadm run monthly parity checks of the RAID arrays?"
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4002
+msgid ""
+"If your kernel supports it (>> 2.6.14), mdadm can periodically check the "
+"parity of your RAID devices. This may be a resource-intensive process, "
+"depending on your setup, but it could help prevent rare cases of data loss."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4003
+msgid ""
+"The default, if turned on, is to run the checks on the first Sunday of every "
+"month at 01:06 o'clock."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5001
+#, fuzzy
+msgid "Do you want to start the RAID monitoring daemon?"
+msgstr "RAID ƻǡưޤ?"
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5002
+msgid ""
+"The RAID monitor daemon sends email notifications in response to important "
+"RAID events (such as a disk failure). You probably want to enable it."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6001
+#, fuzzy
+msgid "Recipient for email notifications:"
+msgstr "ǡ󤫤Τ밸:"
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6002
+#, fuzzy
+msgid ""
+"Please enter the email address of the user who should get the email "
+"notification for these important RAID events."
+msgstr ""
+"RAID 쥤ΥơѲ˥᡼Τ桼Υ᡼륢"
+"쥹ϤƤ"
+
+#~ msgid ""
+#~ "If RAID devices are started automatically, all RAID devices are "
+#~ "discovered and assembled automatically at system startup. This option "
+#~ "should only be used if the md driver is compiled as a module. If it is "
+#~ "compiled into your kernel, the automatic startup will be performed at "
+#~ "boot time by the kernel and therefore you should not choose this option."
+#~ msgstr ""
+#~ "RAID ǥХưŪ˵ư褦ˤȡƥ൯ưƤ RAID "
+#~ "ǥХФ졢ưŪ˹ޤΥץ md ɥ饤Ф"
+#~ "塼Ȥƥѥ뤵ƤΤߤѤޤͥȤ߹"
+#~ "ǥѥ뤷Ƥ硢ƥ൯ư˥ͥˤäƼưư¹Ԥ"
+#~ "ΤǡΥץǤϤǤޤ"
+
+#~ msgid ""
+#~ "When the RAID monitor daemon runs, email notifications are sent when a "
+#~ "disk belonging to a RAID array fails or changes its status for some "
+#~ "reason."
+#~ msgstr ""
+#~ "RAID ƻǡưƤ硢RAID 쥤°Ƥǥξ"
+#~ "뤫餫ͳѲݤ˥᡼Τޤ"
diff --git a/debian/po/nl.po b/debian/po/nl.po
new file mode 100644
index 00000000..10512196
--- /dev/null
+++ b/debian/po/nl.po
@@ -0,0 +1,208 @@
+# translation of mdadm_nl.po to Dutch
+#
+# Translators, if you are not familiar with the PO format, gettext
+# documentation is worth reading, especially sections dedicated to
+# this format, e.g. by running:
+# info -n '(gettext)PO Files'
+# info -n '(gettext)Header Entry'
+# Some information specific to po-debconf are available at
+# /usr/share/doc/po-debconf/README-trans
+# or http://www.debian.org/intl/l10n/po-debconf/README-trans#
+# Developers do not need to manually edit POT or PO files.
+# Frans Pop <aragorn@tiscali.nl>, 2005.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: mdadm_nl\n"
+"Report-Msgid-Bugs-To: pkg-mdadm-devel@lists.alioth.debian.org\n"
+"POT-Creation-Date: 2006-07-07 17:30+0200\n"
+"PO-Revision-Date: 2005-09-19 19:00+0200\n"
+"Last-Translator: Frans Pop <aragorn@tiscali.nl>\n"
+"Language-Team: Dutch <debian-l10n-dutch@lists.debian.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"X-Generator: KBabel 1.9.1\n"
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1001
+msgid "Initialise the superblock if you reuse hard disks"
+msgstr "Superblok initialiseren als u harde schijven hergebruikt"
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1002
+#, fuzzy
+msgid ""
+"WARNING! If you are using hard disks which have RAID superblocks from "
+"earlier installations in different RAID arrays, you MUST zero each "
+"superblock *before* activating the autostart feature."
+msgstr ""
+"Waarschuwing! Als u harde schijven gebruikt die een md-superblok bevatten "
+"van een eerdere installatie in een andere RAID-reeks, dan MOET u het "
+"superblok overschrijven met nullen voordat u de \"autostart\"-optie "
+"activeert."
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1003
+msgid ""
+"To do this, do not start the RAID devices automatically. First, zero the "
+"superblock (mdadm --zero-superblock /dev/mdX). Next, use `dpkg-reconfigure "
+"mdadm` to reactivate the autostart feature."
+msgstr ""
+"U kunt dit doen door RAID-apparaten nu niet automatisch te laten starten. "
+"Overschrijf eerst het superblok met nullen ('mdadm --zero-superblock /dev/"
+"mdX'). Gebruik daarna 'dpkg-reconfigure mdadm' om de \"autostart\"-optie te "
+"activeren."
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1004
+msgid ""
+"If you manage your RAIDs otherwise (e.g. EVMS), either disable autostart, or "
+"ensure that /etc/mdadm/mdadm.conf only lists those arrays you want to start "
+"by mdadm."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2001
+msgid "RAID arrays needed for the root filesystem:"
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2002
+msgid ""
+"If your system has its root filesystem on a RAID volume, it needs to be "
+"started early during the boot sequence. If your root filesystem is on a "
+"logical volume (LVM), which is on RAID, all constituent arrays need to be "
+"started."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2003
+msgid ""
+"If you know exactly which arrays are needed to bring up the root filesystem, "
+"enter them here. Alternatively, enter 'all' to simply start all available "
+"arrays. If you do not need or want to start any arrays for the root "
+"filesystem, leave the answer blank (or enter 'none')."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2004
+msgid ""
+"You have the option to start all other arrays (those not needed for the root "
+"filesystem) later in the boot sequence. Doing so will give you greater "
+"control over the arrays with the mdadm configuration file. Starting all "
+"arrays at boot-time may be safer though."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2005
+msgid ""
+"Please enter a space-separated list of devices, or 'all'. You may omit the "
+"leading '/dev/' and just enter e.g. \"md0 md1\"."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3001
+#, fuzzy
+msgid "Do you want to start RAID devices automatically?"
+msgstr "Wilt u de RAID-apparaten automatisch starten?"
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3002
+msgid ""
+"Once the base system has come up, mdadm can start all RAID devices specified "
+"in /etc/mdadm/mdadm.conf, which have not yet been started. Unless you have "
+"compiled RAID support into the kernel (in which case all RAID arrays with "
+"partitions of type 0xfd (and only those) are started automatically anyway), "
+"this is probably what you want."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4001
+msgid "Should mdadm run monthly parity checks of the RAID arrays?"
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4002
+msgid ""
+"If your kernel supports it (>> 2.6.14), mdadm can periodically check the "
+"parity of your RAID devices. This may be a resource-intensive process, "
+"depending on your setup, but it could help prevent rare cases of data loss."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4003
+msgid ""
+"The default, if turned on, is to run the checks on the first Sunday of every "
+"month at 01:06 o'clock."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5001
+#, fuzzy
+msgid "Do you want to start the RAID monitoring daemon?"
+msgstr "Wilt u de achtergronddienst voor de RAID-monitor starten?"
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5002
+msgid ""
+"The RAID monitor daemon sends email notifications in response to important "
+"RAID events (such as a disk failure). You probably want to enable it."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6001
+#, fuzzy
+msgid "Recipient for email notifications:"
+msgstr "Adres voor mailberichten van achtergronddienst:"
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6002
+#, fuzzy
+msgid ""
+"Please enter the email address of the user who should get the email "
+"notification for these important RAID events."
+msgstr ""
+"Geef het mailadres in van de gebruiker die een mailbericht dient te "
+"ontvangen als de status van een RAID-reeks wijzigt."
+
+#~ msgid ""
+#~ "If RAID devices are started automatically, all RAID devices are "
+#~ "discovered and assembled automatically at system startup. This option "
+#~ "should only be used if the md driver is compiled as a module. If it is "
+#~ "compiled into your kernel, the automatic startup will be performed at "
+#~ "boot time by the kernel and therefore you should not choose this option."
+#~ msgstr ""
+#~ "Als RAID-apparaten automatisch worden gestart, worden alle RAID-apparaten "
+#~ "tijdens het opstarten van het systeem automatisch gedetecteerd en "
+#~ "samengesteld. Deze optie dient alleen te worden gebruikt als het md-"
+#~ "stuurprogramma als module is gecompileerd. Als het stuurprogramma in de "
+#~ "kernel is gecompileerd, wordt de automatische detectie verzorgd door de "
+#~ "kernel en dient deze optie dus niet te worden geselecteerd."
+
+#~ msgid ""
+#~ "When the RAID monitor daemon runs, email notifications are sent when a "
+#~ "disk belonging to a RAID array fails or changes its status for some "
+#~ "reason."
+#~ msgstr ""
+#~ "Als de achtergronddienst voor de RAID-monitor actief is, worden "
+#~ "mailberichten gestuurd als een harde schijf die deel uitmaakt van een "
+#~ "RAID-reeks het begeeft of de status ervan wijzigt."
diff --git a/debian/po/pt_BR.po b/debian/po/pt_BR.po
new file mode 100644
index 00000000..35a74b25
--- /dev/null
+++ b/debian/po/pt_BR.po
@@ -0,0 +1,232 @@
+#
+# Translators, if you are not familiar with the PO format, gettext
+# documentation is worth reading, especially sections dedicated to
+# this format, e.g. by running:
+# info -n '(gettext)PO Files'
+# info -n '(gettext)Header Entry'
+#
+# Some information specific to po-debconf are available at
+# /usr/share/doc/po-debconf/README-trans
+# or http://www.debian.org/intl/l10n/po-debconf/README-trans
+#
+# Developers do not need to manually edit POT or PO files.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: mdadm\n"
+"Report-Msgid-Bugs-To: pkg-mdadm-devel@lists.alioth.debian.org\n"
+"POT-Creation-Date: 2006-07-07 17:30+0200\n"
+"PO-Revision-Date: 2006-06-30 01:06-0300\n"
+"Last-Translator: Felipe Augusto van de Wiel (faw) <felipe@cathedrallabs."
+"org>\n"
+"Language-Team: l10n portuguese <debian-l10n-portuguese@lists.debian.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"pt_BR utf-8\n"
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1001
+msgid "Initialise the superblock if you reuse hard disks"
+msgstr "Inicialize o superbloco caso você reutilize discos rígidos"
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1002
+msgid ""
+"WARNING! If you are using hard disks which have RAID superblocks from "
+"earlier installations in different RAID arrays, you MUST zero each "
+"superblock *before* activating the autostart feature."
+msgstr ""
+"AVISO! Se você estiver usando discos rígidos que já contêm superblocos RAID "
+"de instalações anteriores em \"arrays\" RAID diferentes, você DEVE zerar o "
+"superbloco *antes* de ativar o recurso de \"autostart\"."
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1003
+msgid ""
+"To do this, do not start the RAID devices automatically. First, zero the "
+"superblock (mdadm --zero-superblock /dev/mdX). Next, use `dpkg-reconfigure "
+"mdadm` to reactivate the autostart feature."
+msgstr ""
+"Para fazê-lo, não inicie os dispositivos RAID automaticamente. Primeiro, "
+"zere os superblocos (mdadm --zero-superblock /dev/mdX). Em seguida, use "
+"`dpkg-reconfigure mdadm` para reativar o recurso de \"autostart\"."
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1004
+msgid ""
+"If you manage your RAIDs otherwise (e.g. EVMS), either disable autostart, or "
+"ensure that /etc/mdadm/mdadm.conf only lists those arrays you want to start "
+"by mdadm."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2001
+msgid "RAID arrays needed for the root filesystem:"
+msgstr "\"Arrays\" RAID necessários para o sistema de arquivos raiz:"
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2002
+msgid ""
+"If your system has its root filesystem on a RAID volume, it needs to be "
+"started early during the boot sequence. If your root filesystem is on a "
+"logical volume (LVM), which is on RAID, all constituent arrays need to be "
+"started."
+msgstr ""
+"Se o seu sistema tem o sistema de arquivos raiz em um volume RAID, este "
+"precisa ser iniciado mais cedo durante a seqüência de inicialização. Se o "
+"sistema de arquivos raiz está em um volume lógico (LVM), que está em um "
+"RAID, todos os \"arrays\" que o compõem precisam ser iniciados."
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2003
+msgid ""
+"If you know exactly which arrays are needed to bring up the root filesystem, "
+"enter them here. Alternatively, enter 'all' to simply start all available "
+"arrays. If you do not need or want to start any arrays for the root "
+"filesystem, leave the answer blank (or enter 'none')."
+msgstr ""
+"Se você sabe exatamente quais \"arrays\" são necessários para ativar o "
+"sistema de arquivos raiz, informe-os aqui. Como alternativa, informe 'all' "
+"para simplesmente iniciar todos os \"arrays\" disponíveis. Se você não "
+"precisa ou não quer iniciar qualquer \"array\" para o sistema de arquivos "
+"raiz, deixa a resposta em branco (ou informe 'none')."
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2004
+msgid ""
+"You have the option to start all other arrays (those not needed for the root "
+"filesystem) later in the boot sequence. Doing so will give you greater "
+"control over the arrays with the mdadm configuration file. Starting all "
+"arrays at boot-time may be safer though."
+msgstr ""
+"Você tem a opção de iniciar todos os \"arrays\" (aqueles que não são "
+"necessários pelo sistema de arquivos raiz) posteriormente na seqüência de "
+"inicialização. Fazendo isto, você terá um controle maior sobre os \"arrays\" "
+"com o arquivo de configuração mdadm. No entanto, iniciar todos os \"arrays\" "
+"durante a inicialização pode ser mais seguro."
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2005
+msgid ""
+"Please enter a space-separated list of devices, or 'all'. You may omit the "
+"leading '/dev/' and just enter e.g. \"md0 md1\"."
+msgstr ""
+"Por favor, informe uma lista, separada por espaços, de dispositivos, ou "
+"'all'. Você pode omitir a parte inicial '/dev/' e apenas informar, por "
+"exemplo, \"md0 md1\"."
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3001
+msgid "Do you want to start RAID devices automatically?"
+msgstr "Você deseja iniciar os dispositivos RAID automaticamente?"
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3002
+#, fuzzy
+msgid ""
+"Once the base system has come up, mdadm can start all RAID devices specified "
+"in /etc/mdadm/mdadm.conf, which have not yet been started. Unless you have "
+"compiled RAID support into the kernel (in which case all RAID arrays with "
+"partitions of type 0xfd (and only those) are started automatically anyway), "
+"this is probably what you want."
+msgstr ""
+"Uma vez que o sistema básico esteja disponível, mdadm pode iniciar todos os "
+"dispositivos RAID que encontrar e que ainda não tenham sido iniciados. A "
+"menos que você tenha compilado o suporte RAID dentro do kernel (neste caso "
+"todos os \"arrays\" RAID serão automaticamente iniciados de qualquer forma), "
+"esta é provavelmente uma opção desejável."
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4001
+msgid "Should mdadm run monthly parity checks of the RAID arrays?"
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4002
+msgid ""
+"If your kernel supports it (>> 2.6.14), mdadm can periodically check the "
+"parity of your RAID devices. This may be a resource-intensive process, "
+"depending on your setup, but it could help prevent rare cases of data loss."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4003
+msgid ""
+"The default, if turned on, is to run the checks on the first Sunday of every "
+"month at 01:06 o'clock."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5001
+msgid "Do you want to start the RAID monitoring daemon?"
+msgstr "Você deseja iniciar o \"daemon\" de monitoramento RAID?"
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5002
+msgid ""
+"The RAID monitor daemon sends email notifications in response to important "
+"RAID events (such as a disk failure). You probably want to enable it."
+msgstr ""
+"O \"daemon\" de monitoramento RAID envia e-mails de notificações em resposta "
+"a eventos RAID importantes (como uma falha de disco). Você provavelmente "
+"quer habilitar esta opção."
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6001
+msgid "Recipient for email notifications:"
+msgstr "Destinatário para os e-mails de notificações:"
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6002
+msgid ""
+"Please enter the email address of the user who should get the email "
+"notification for these important RAID events."
+msgstr ""
+"Por favor, informe o endereço de e-mail do usuário que deverá receber os e-"
+"mails de notificações para estes eventos RAID importantes."
+
+#~ msgid ""
+#~ "If RAID devices are started automatically, all RAID devices are "
+#~ "discovered and assembled automatically at system startup. This option "
+#~ "should only be used if the md driver is compiled as a module. If it is "
+#~ "compiled into your kernel, the automatic startup will be performed at "
+#~ "boot time by the kernel and therefore you should not choose this option."
+#~ msgstr ""
+#~ "Caso os dispositivos RAID sejam iniciados automaticamente, todos os "
+#~ "dispositivos RAID serão detectados e montados automaticamente na "
+#~ "inicialização do sistema operacional. Esta opção deverá ser usada somente "
+#~ "caso o driver md esteja compilado como módulo. Caso o mesmo esteja "
+#~ "compilado embutido em seu kernel, a inicialização automática será "
+#~ "executada em tempo de inicialização pelo próprio kernel e, portanto, você "
+#~ "não deverá e nem precisará escolher esta opção."
+
+#~ msgid ""
+#~ "When the RAID monitor daemon runs, email notifications are sent when a "
+#~ "disk belonging to a RAID array fails or changes its status for some "
+#~ "reason."
+#~ msgstr ""
+#~ "Quando o daemon monitorador RAID é executado, notificações via e-mail são "
+#~ "enviadas quando um disco pertencente a uma array RAID falha ou muda seu "
+#~ "status por qualquer razão."
+
+#~ msgid "Which user should get the email notification?"
+#~ msgstr "Qual usuário deve receber o e-mail de notificação ?"
diff --git a/debian/po/ru.po b/debian/po/ru.po
new file mode 100644
index 00000000..cff6e2ef
--- /dev/null
+++ b/debian/po/ru.po
@@ -0,0 +1,204 @@
+# translation of ru.po to Russian
+# translation of mdadm_1.12.0-1_ru.po to Russian
+#
+# Translators, if you are not familiar with the PO format, gettext
+# documentation is worth reading, especially sections dedicated to
+# this format, e.g. by running:
+# info -n '(gettext)PO Files'
+# info -n '(gettext)Header Entry'
+# Some information specific to po-debconf are available at
+# /usr/share/doc/po-debconf/README-trans
+# or http://www.debian.org/intl/l10n/po-debconf/README-trans#
+# Developers do not need to manually edit POT or PO files.
+# Yuri Kozlov <kozlov.y@gmail.com>, 2006.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: mdadm_1.12.0-1_ru\n"
+"Report-Msgid-Bugs-To: pkg-mdadm-devel@lists.alioth.debian.org\n"
+"POT-Creation-Date: 2006-07-07 17:30+0200\n"
+"PO-Revision-Date: 2006-06-30 22:29+0300\n"
+"Last-Translator: Yuri Kozlov <kozlov.y@gmail.com>\n"
+"Language-Team: Russian <debian-l10n-russian@lists.debian.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"X-Generator: KBabel 1.9.1\n"
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1001
+msgid "Initialise the superblock if you reuse hard disks"
+msgstr "Инициализация суперблока, если диски уже использовались"
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1002
+msgid ""
+"WARNING! If you are using hard disks which have RAID superblocks from "
+"earlier installations in different RAID arrays, you MUST zero each "
+"superblock *before* activating the autostart feature."
+msgstr ""
+"ПРЕДУПРЕЖДЕНИЕ! Если вы используете диски, на которых есть RAID суперблоки "
+"от предыдущих установок других RAID массивов, вы ДОЛЖНЫ обнулить каждый "
+"суперблок *перед* тем как активировать возможность автостарта."
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1003
+msgid ""
+"To do this, do not start the RAID devices automatically. First, zero the "
+"superblock (mdadm --zero-superblock /dev/mdX). Next, use `dpkg-reconfigure "
+"mdadm` to reactivate the autostart feature."
+msgstr ""
+"Для этого сначала выключите автоматический запуск RAID устройств. Потом, "
+"обнулите суперблок (mdadm --zero-superblock /dev/mdX) и заново включите "
+"автозапуск с помощью команды `dpkg-reconfigure mdadm`."
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1004
+msgid ""
+"If you manage your RAIDs otherwise (e.g. EVMS), either disable autostart, or "
+"ensure that /etc/mdadm/mdadm.conf only lists those arrays you want to start "
+"by mdadm."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2001
+msgid "RAID arrays needed for the root filesystem:"
+msgstr "RAID массивы, нужные для корневой файловой системы:"
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2002
+msgid ""
+"If your system has its root filesystem on a RAID volume, it needs to be "
+"started early during the boot sequence. If your root filesystem is on a "
+"logical volume (LVM), which is on RAID, all constituent arrays need to be "
+"started."
+msgstr ""
+"Если в системе корневая файловая система расположена на томе RAID, он должен "
+"быть запущен в самом начале процесса загрузки. Если корневая файловая "
+"система расположена на логическом томе (LVM), который расположен на RAID, то "
+"все компоненты массивов должны быть запущены."
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2003
+msgid ""
+"If you know exactly which arrays are needed to bring up the root filesystem, "
+"enter them here. Alternatively, enter 'all' to simply start all available "
+"arrays. If you do not need or want to start any arrays for the root "
+"filesystem, leave the answer blank (or enter 'none')."
+msgstr ""
+"Если вы точно знаете какие массивы требуются для получения корневой файловой "
+"системы, введите их здесь. Иначе, введите слово 'all', чтобы просто "
+"запустить все доступные массивы. Если вам это не нужно или вы не хотите "
+"запускать массивы для корневой файловой системы, оставьте это поле пустым "
+"(или введите слово 'none')."
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2004
+msgid ""
+"You have the option to start all other arrays (those not needed for the root "
+"filesystem) later in the boot sequence. Doing so will give you greater "
+"control over the arrays with the mdadm configuration file. Starting all "
+"arrays at boot-time may be safer though."
+msgstr ""
+"Вы можете указать все остальные запускаемые массивы (те, которые не "
+"требуются для корневой файловой системы) позже в процессе загрузки. Такое "
+"разделение позволяет полностью управлять массивами с помощью "
+"конфигурационного файла mdadm. Хотя запуск всех массивов во время начальной "
+"загрузке может быть безопасным."
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2005
+msgid ""
+"Please enter a space-separated list of devices, or 'all'. You may omit the "
+"leading '/dev/' and just enter e.g. \"md0 md1\"."
+msgstr ""
+"Введите список устройств через пробел или слово 'all'. Вы можете не "
+"указывать начальный путь типа '/dev/', а просто вводить имена устройств, "
+"например \"md0 md1\"."
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3001
+msgid "Do you want to start RAID devices automatically?"
+msgstr "Запускать RAID устройства автоматически?"
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3002
+#, fuzzy
+msgid ""
+"Once the base system has come up, mdadm can start all RAID devices specified "
+"in /etc/mdadm/mdadm.conf, which have not yet been started. Unless you have "
+"compiled RAID support into the kernel (in which case all RAID arrays with "
+"partitions of type 0xfd (and only those) are started automatically anyway), "
+"this is probably what you want."
+msgstr ""
+"После старта базовой системы mdadm может обнаружить и запустить все RAID "
+"устройства, которые ещё не были запущены. Этот параметр нужно включить, если "
+"вы не вкомпилировали поддержку RAID в ядро (в противном случае ядро само "
+"выполняет автоматический запуск RAID)."
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4001
+msgid "Should mdadm run monthly parity checks of the RAID arrays?"
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4002
+msgid ""
+"If your kernel supports it (>> 2.6.14), mdadm can periodically check the "
+"parity of your RAID devices. This may be a resource-intensive process, "
+"depending on your setup, but it could help prevent rare cases of data loss."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4003
+msgid ""
+"The default, if turned on, is to run the checks on the first Sunday of every "
+"month at 01:06 o'clock."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5001
+msgid "Do you want to start the RAID monitoring daemon?"
+msgstr "Запускать демон-монитор RAID?"
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5002
+msgid ""
+"The RAID monitor daemon sends email notifications in response to important "
+"RAID events (such as a disk failure). You probably want to enable it."
+msgstr ""
+"При запущенном демоне-мониторе RAID в случае отказа диска принадлежащего "
+"RAID или изменения его состояния по какой-либо причине, будет отправляться "
+"почтовое уведомление. Лучше это включить."
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6001
+msgid "Recipient for email notifications:"
+msgstr "Получатель уведомительных писем:"
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6002
+msgid ""
+"Please enter the email address of the user who should get the email "
+"notification for these important RAID events."
+msgstr ""
+"Введите адрес электронной почты пользователя, который будет получать "
+"почтовые уведомления о важных изменениях в состоянии RAID."
diff --git a/debian/po/sv.po b/debian/po/sv.po
new file mode 100644
index 00000000..5047497d
--- /dev/null
+++ b/debian/po/sv.po
@@ -0,0 +1,218 @@
+# Translators, if you are not familiar with the PO format, gettext
+# documentation is worth reading, especially sections dedicated to
+# this format, e.g. by running:
+# info -n '(gettext)PO Files'
+# info -n '(gettext)Header Entry'
+# Some information specific to po-debconf are available at
+# /usr/share/doc/po-debconf/README-trans
+# or http://www.debian.org/intl/l10n/po-debconf/README-trans
+# Developers do not need to manually edit POT or PO files.
+# , fuzzy
+#
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: mdadm 1.12.0-1\n"
+"Report-Msgid-Bugs-To: pkg-mdadm-devel@lists.alioth.debian.org\n"
+"POT-Creation-Date: 2006-07-07 17:30+0200\n"
+"PO-Revision-Date: 2006-06-29 22:06+0100\n"
+"Last-Translator: Daniel Nylander <po@danielnylander.se>\n"
+"Language-Team: Swedish <tp-sv@listor.tp-sv.se>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=iso-8859-1\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1001
+msgid "Initialise the superblock if you reuse hard disks"
+msgstr "Initiera superblocket om du ?teranv?nder h?rddiskar"
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1002
+msgid ""
+"WARNING! If you are using hard disks which have RAID superblocks from "
+"earlier installations in different RAID arrays, you MUST zero each "
+"superblock *before* activating the autostart feature."
+msgstr ""
+"VARNING! Om du anv?nder h?rddiskar som har RAID-superblock fr?n tidigare "
+"installationer i andra RAID-kedjor M?STE du nollst?lla superblocket *innan* "
+"du aktiverar autostartfunktionen."
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1003
+msgid ""
+"To do this, do not start the RAID devices automatically. First, zero the "
+"superblock (mdadm --zero-superblock /dev/mdX). Next, use `dpkg-reconfigure "
+"mdadm` to reactivate the autostart feature."
+msgstr ""
+"F?r att g?ra detta, starta inte RAID-enheterna automatiskt. Nollst?ll f?rst "
+"superblocket (mdadm --zero-superblock /dev/mdX), anv?nd sedan \"dpkg-"
+"reconfigure mdadm\" f?r att ?teraktivera autostartfunktionen."
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1004
+msgid ""
+"If you manage your RAIDs otherwise (e.g. EVMS), either disable autostart, or "
+"ensure that /etc/mdadm/mdadm.conf only lists those arrays you want to start "
+"by mdadm."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2001
+msgid "RAID arrays needed for the root filesystem:"
+msgstr "RAID-kedjor som beh?vs f?r rotfilsystemet:"
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2002
+msgid ""
+"If your system has its root filesystem on a RAID volume, it needs to be "
+"started early during the boot sequence. If your root filesystem is on a "
+"logical volume (LVM), which is on RAID, all constituent arrays need to be "
+"started."
+msgstr ""
+"Om ditt system har sitt rotfilsystem p? en RAID-volym beh?ver den startas "
+"upp tidigt i uppstartssekvensen. Om ditt rotfilsystem finns p? en logisk "
+"volym (LVM), vilket ?r p? RAID beh?ver alla best?ende kedjor att startas."
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2003
+msgid ""
+"If you know exactly which arrays are needed to bring up the root filesystem, "
+"enter them here. Alternatively, enter 'all' to simply start all available "
+"arrays. If you do not need or want to start any arrays for the root "
+"filesystem, leave the answer blank (or enter 'none')."
+msgstr ""
+"Om du vet exakt vilka kedjor som beh?vs f?r att ta upp rotfilsystemet, ange "
+"dem h?r. Alternativt, ange \"all\" f?r att helt enkelt starta alla tillg?"
+"ngliga kedjor. Om du inte beh?ver eller vill starta n?gra kedjor f?r "
+"rotfilsystemet, l?mna svaret blankt h?r (eller ange \"none\")."
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2004
+msgid ""
+"You have the option to start all other arrays (those not needed for the root "
+"filesystem) later in the boot sequence. Doing so will give you greater "
+"control over the arrays with the mdadm configuration file. Starting all "
+"arrays at boot-time may be safer though."
+msgstr ""
+"Du har alternativet att starta alla andra kedjor (de som inte beh?vs f?r "
+"rotfilsystemet) senare i uppstartssekvensen. Att g?ra det ger dig st?rre "
+"kontroll ?ver kedjorna med mdadms konfigurationsfil. Starta alla kedjor vid "
+"uppstart kan dock vara s?krare."
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2005
+msgid ""
+"Please enter a space-separated list of devices, or 'all'. You may omit the "
+"leading '/dev/' and just enter e.g. \"md0 md1\"."
+msgstr ""
+"Ange en blankstegsseparerad lista p? enheter, eller \"all\". Du kan utesluta "
+"den inledande \"/dev\" och bara ange t.ex. \"md0 md1\"."
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3001
+msgid "Do you want to start RAID devices automatically?"
+msgstr "Vill du starta RAID-enheter automatiskt?"
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3002
+#, fuzzy
+msgid ""
+"Once the base system has come up, mdadm can start all RAID devices specified "
+"in /etc/mdadm/mdadm.conf, which have not yet been started. Unless you have "
+"compiled RAID support into the kernel (in which case all RAID arrays with "
+"partitions of type 0xfd (and only those) are started automatically anyway), "
+"this is probably what you want."
+msgstr ""
+"N?r grundsystemet har kommit upp kan mdadm starta alla RAID-enheter som den "
+"kan hitta och som inte har startats. S?vida du har byggt in RAID-st?d i k?"
+"rnan (vilket inneb?r att alla RAID-kedjor startas upp automatiskt), detta ?r "
+"antagligen n?got du vill g?ra."
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4001
+msgid "Should mdadm run monthly parity checks of the RAID arrays?"
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4002
+msgid ""
+"If your kernel supports it (>> 2.6.14), mdadm can periodically check the "
+"parity of your RAID devices. This may be a resource-intensive process, "
+"depending on your setup, but it could help prevent rare cases of data loss."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4003
+msgid ""
+"The default, if turned on, is to run the checks on the first Sunday of every "
+"month at 01:06 o'clock."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5001
+msgid "Do you want to start the RAID monitoring daemon?"
+msgstr "Vil du starta RAID-?vervakningsdemonen?"
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5002
+msgid ""
+"The RAID monitor daemon sends email notifications in response to important "
+"RAID events (such as a disk failure). You probably want to enable it."
+msgstr ""
+"RAID-?vervakningsdemonen skickar e-postnotifieringar f?r viktiga RAID-h?"
+"ndelser (s?som ett diskfel). Du vill antagligen aktivera denna funktion."
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6001
+msgid "Recipient for email notifications:"
+msgstr "Mottagare f?r e-postnotifieringar:"
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6002
+msgid ""
+"Please enter the email address of the user who should get the email "
+"notification for these important RAID events."
+msgstr ""
+"Ange e-postadressen till den anv?ndare som ska ta emot e-postnotifieringar f?"
+"r dessa viktiga RAID-h?ndelser."
+
+#~ msgid ""
+#~ "If RAID devices are started automatically, all RAID devices are "
+#~ "discovered and assembled automatically at system startup. This option "
+#~ "should only be used if the md driver is compiled as a module. If it is "
+#~ "compiled into your kernel, the automatic startup will be performed at "
+#~ "boot time by the kernel and therefore you should not choose this option."
+#~ msgstr ""
+#~ "Om RAID-enheter startas automatiskt kommer alla RAID-enheter att uppt?"
+#~ "ckas och s?ttas ihop automatiskt vid systemets uppstart. Denna funktion b?"
+#~ "r bara anv?ndas om md-drivaren ?r byggd som en modul. Om den ?r inbyggd i "
+#~ "din k?rna kommer den automatiska uppstarten att g?ras av k?rnan vid "
+#~ "systemets uppstart och d?rf?r b?r du inte v?lja denna funktion h?r."
+
+#~ msgid ""
+#~ "When the RAID monitor daemon runs, email notifications are sent when a "
+#~ "disk belonging to a RAID array fails or changes its status for some "
+#~ "reason."
+#~ msgstr ""
+#~ "N?r RAID-?vervakningsdaemonen k?r kommer e-postnotifieringar att skickas "
+#~ "n?r en disk som tillh?r en RAID-array fallerar eller ?ndrar status av n?"
+#~ "gon anledning."
diff --git a/debian/po/templates.pot b/debian/po/templates.pot
new file mode 100644
index 00000000..e9d5d10d
--- /dev/null
+++ b/debian/po/templates.pot
@@ -0,0 +1,162 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
+# This file is distributed under the same license as the PACKAGE package.
+# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: PACKAGE VERSION\n"
+"Report-Msgid-Bugs-To: pkg-mdadm-devel@lists.alioth.debian.org\n"
+"POT-Creation-Date: 2006-07-07 17:30+0200\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: LANGUAGE <LL@li.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=CHARSET\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1001
+msgid "Initialise the superblock if you reuse hard disks"
+msgstr ""
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1002
+msgid ""
+"WARNING! If you are using hard disks which have RAID superblocks from "
+"earlier installations in different RAID arrays, you MUST zero each "
+"superblock *before* activating the autostart feature."
+msgstr ""
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1003
+msgid ""
+"To do this, do not start the RAID devices automatically. First, zero the "
+"superblock (mdadm --zero-superblock /dev/mdX). Next, use `dpkg-reconfigure "
+"mdadm` to reactivate the autostart feature."
+msgstr ""
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1004
+msgid ""
+"If you manage your RAIDs otherwise (e.g. EVMS), either disable autostart, or "
+"ensure that /etc/mdadm/mdadm.conf only lists those arrays you want to start "
+"by mdadm."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2001
+msgid "RAID arrays needed for the root filesystem:"
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2002
+msgid ""
+"If your system has its root filesystem on a RAID volume, it needs to be "
+"started early during the boot sequence. If your root filesystem is on a "
+"logical volume (LVM), which is on RAID, all constituent arrays need to be "
+"started."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2003
+msgid ""
+"If you know exactly which arrays are needed to bring up the root filesystem, "
+"enter them here. Alternatively, enter 'all' to simply start all available "
+"arrays. If you do not need or want to start any arrays for the root "
+"filesystem, leave the answer blank (or enter 'none')."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2004
+msgid ""
+"You have the option to start all other arrays (those not needed for the root "
+"filesystem) later in the boot sequence. Doing so will give you greater "
+"control over the arrays with the mdadm configuration file. Starting all "
+"arrays at boot-time may be safer though."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2005
+msgid ""
+"Please enter a space-separated list of devices, or 'all'. You may omit the "
+"leading '/dev/' and just enter e.g. \"md0 md1\"."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3001
+msgid "Do you want to start RAID devices automatically?"
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3002
+msgid ""
+"Once the base system has come up, mdadm can start all RAID devices specified "
+"in /etc/mdadm/mdadm.conf, which have not yet been started. Unless you have "
+"compiled RAID support into the kernel (in which case all RAID arrays with "
+"partitions of type 0xfd (and only those) are started automatically anyway), "
+"this is probably what you want."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4001
+msgid "Should mdadm run monthly parity checks of the RAID arrays?"
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4002
+msgid ""
+"If your kernel supports it (>> 2.6.14), mdadm can periodically check the "
+"parity of your RAID devices. This may be a resource-intensive process, "
+"depending on your setup, but it could help prevent rare cases of data loss."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4003
+msgid ""
+"The default, if turned on, is to run the checks on the first Sunday of every "
+"month at 01:06 o'clock."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5001
+msgid "Do you want to start the RAID monitoring daemon?"
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5002
+msgid ""
+"The RAID monitor daemon sends email notifications in response to important "
+"RAID events (such as a disk failure). You probably want to enable it."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6001
+msgid "Recipient for email notifications:"
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6002
+msgid ""
+"Please enter the email address of the user who should get the email "
+"notification for these important RAID events."
+msgstr ""
diff --git a/debian/po/vi.po b/debian/po/vi.po
new file mode 100644
index 00000000..4711ed1e
--- /dev/null
+++ b/debian/po/vi.po
@@ -0,0 +1,197 @@
+# Vietnamese Translation for mdadm.
+# Copyright © 2005 Free Software Foundation, Inc.
+# Clytie Siddall <clytie@riverland.net.au>, 2005.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: mdadm 1.12.0-1\n"
+"Report-Msgid-Bugs-To: pkg-mdadm-devel@lists.alioth.debian.org\n"
+"POT-Creation-Date: 2006-07-07 17:30+0200\n"
+"PO-Revision-Date: 2005-08-19 21:55+0930\n"
+"Last-Translator: Clytie Siddall <clytie@riverland.net.au>\n"
+"Language-Team: Vietnamese <gnomevi-list@lists.sourceforge.net>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=1; plural=0\n"
+"X-Generator: LocFactoryEditor 1.2.2\n"
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1001
+msgid "Initialise the superblock if you reuse hard disks"
+msgstr "Hãy khởi động siêu khối nếu bạn sử dụng lại đĩa cứng."
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1002
+#, fuzzy
+msgid ""
+"WARNING! If you are using hard disks which have RAID superblocks from "
+"earlier installations in different RAID arrays, you MUST zero each "
+"superblock *before* activating the autostart feature."
+msgstr ""
+"CẢNH BÁO: nếu bạn đang sử dụng đĩa cứng nào có một siêu khối md từ một việc "
+"cài đặt trước trong một mảng RAID khác, thì bạn PHẢI định dạng siêu khối ấy "
+"trước khi hoạt hóa tính năng tự động khởi động."
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1003
+msgid ""
+"To do this, do not start the RAID devices automatically. First, zero the "
+"superblock (mdadm --zero-superblock /dev/mdX). Next, use `dpkg-reconfigure "
+"mdadm` to reactivate the autostart feature."
+msgstr ""
+"Để làm như thế, đừng tự động khởi động những thiết bị RAID. Hãy định dạng "
+"siêu khối (« mdadm --zero-superblock /dev/mdX »). Sau đó, bạn có thể sử dụng "
+"lệnh « dpkg-reconfigure mdadm » để hoạt hóa tính năng tự động khởi động."
+
+#. Type: note
+#. Description
+#: ../mdadm.templates:1004
+msgid ""
+"If you manage your RAIDs otherwise (e.g. EVMS), either disable autostart, or "
+"ensure that /etc/mdadm/mdadm.conf only lists those arrays you want to start "
+"by mdadm."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2001
+msgid "RAID arrays needed for the root filesystem:"
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2002
+msgid ""
+"If your system has its root filesystem on a RAID volume, it needs to be "
+"started early during the boot sequence. If your root filesystem is on a "
+"logical volume (LVM), which is on RAID, all constituent arrays need to be "
+"started."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2003
+msgid ""
+"If you know exactly which arrays are needed to bring up the root filesystem, "
+"enter them here. Alternatively, enter 'all' to simply start all available "
+"arrays. If you do not need or want to start any arrays for the root "
+"filesystem, leave the answer blank (or enter 'none')."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2004
+msgid ""
+"You have the option to start all other arrays (those not needed for the root "
+"filesystem) later in the boot sequence. Doing so will give you greater "
+"control over the arrays with the mdadm configuration file. Starting all "
+"arrays at boot-time may be safer though."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:2005
+msgid ""
+"Please enter a space-separated list of devices, or 'all'. You may omit the "
+"leading '/dev/' and just enter e.g. \"md0 md1\"."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3001
+#, fuzzy
+msgid "Do you want to start RAID devices automatically?"
+msgstr "Bạn có muốn tự động khởi động những thiết bị RAID không?"
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:3002
+msgid ""
+"Once the base system has come up, mdadm can start all RAID devices specified "
+"in /etc/mdadm/mdadm.conf, which have not yet been started. Unless you have "
+"compiled RAID support into the kernel (in which case all RAID arrays with "
+"partitions of type 0xfd (and only those) are started automatically anyway), "
+"this is probably what you want."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4001
+msgid "Should mdadm run monthly parity checks of the RAID arrays?"
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4002
+msgid ""
+"If your kernel supports it (>> 2.6.14), mdadm can periodically check the "
+"parity of your RAID devices. This may be a resource-intensive process, "
+"depending on your setup, but it could help prevent rare cases of data loss."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:4003
+msgid ""
+"The default, if turned on, is to run the checks on the first Sunday of every "
+"month at 01:06 o'clock."
+msgstr ""
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5001
+#, fuzzy
+msgid "Do you want to start the RAID monitoring daemon?"
+msgstr "Bạn có muốn khởi chạy trình nền theo dõi RAID không?"
+
+#. Type: boolean
+#. Description
+#: ../mdadm.templates:5002
+msgid ""
+"The RAID monitor daemon sends email notifications in response to important "
+"RAID events (such as a disk failure). You probably want to enable it."
+msgstr ""
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6001
+#, fuzzy
+msgid "Recipient for email notifications:"
+msgstr "Người nhận thư thông báo từ trình nền:"
+
+#. Type: string
+#. Description
+#: ../mdadm.templates:6002
+#, fuzzy
+msgid ""
+"Please enter the email address of the user who should get the email "
+"notification for these important RAID events."
+msgstr ""
+"Hãy nhập địa chỉ thư điện tử của người dùng nên nhận thư thông báo họ nếu "
+"trạng thái của một mảng RAID có thay đổi."
+
+#~ msgid ""
+#~ "If RAID devices are started automatically, all RAID devices are "
+#~ "discovered and assembled automatically at system startup. This option "
+#~ "should only be used if the md driver is compiled as a module. If it is "
+#~ "compiled into your kernel, the automatic startup will be performed at "
+#~ "boot time by the kernel and therefore you should not choose this option."
+#~ msgstr ""
+#~ "Nếu bạn chọn tự động khởi động thiết bị RAID, khi hệ thống khởi động, nó "
+#~ "tự động phát hiện và tập hợp các thiết bị RAID. Bạn chỉ nên sử dụng tùy "
+#~ "chọn này nếu đã biên dịch trình hỗ trợ thiết bị md là một mô-đun. Nếu đã "
+#~ "biên dịch nó vào hạt nhân bạn, thì hạt nhân sẽ tự động khởi động, vì vậy "
+#~ "bạn không nên chọn tùy chọn này."
+
+#~ msgid ""
+#~ "When the RAID monitor daemon runs, email notifications are sent when a "
+#~ "disk belonging to a RAID array fails or changes its status for some "
+#~ "reason."
+#~ msgstr ""
+#~ "Khi trình nền (dæmon) theo dõi RAID chạy thì nó gửi thông báo qua thư mỗi "
+#~ "lúc một đĩa thuộc về một mảng RAID có hỏng hóc hoặc thay đổi trạng thái "
+#~ "vì một lý do nào đó,"
diff --git a/debian/rootraiddoc.97.html b/debian/rootraiddoc.97.html
new file mode 100644
index 00000000..a08e73db
--- /dev/null
+++ b/debian/rootraiddoc.97.html
@@ -0,0 +1,1333 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+ <html>
+ <head>
+ <title>Convert Root System to Bootable Software RAID1 (Debian)</title>
+ <style type="text/css">
+body {
+ color: black;
+ background-color: #ffffcc;
+ margin: 0% 6% 6%;
+ }
+h1 {
+ color: green;
+ margin: 3% 0% 2% -3%;
+ }
+h2 {
+ margin: 4% 0% 1% -3%;
+ font-weight: normal;
+ }
+.toc {
+ padding: 0% 6% 6%;
+ border: solid #cccccc 1px;
+ }
+div.up {
+ cursor: pointer;
+ cursor: hand;
+ margin: 6% 0% 0% 0%;
+ padding: 4% 0% 0%;
+ border-bottom: solid #cccccc 1px;
+ }
+a.up {
+ color: #cccccc;
+ text-decoration: none;
+ }
+.reboot {
+ color: red;
+ background-color: white;
+ }
+.code {
+ font-family: monospace;
+ margin: 0% 1% 2% 2%;
+ }
+.todo {
+ color: green;
+ background-color: yellow;
+ padding: 1%;
+ }
+.note {
+ border: dashed gray 1px;
+ padding: 1%;
+ margin: 0% 0% 2% 0%;
+ }
+p {
+ margin: 1% 0%;
+ }
+ </style>
+ </head>
+ <body>
+
+
+
+
+
+
+ <!-- Header -->
+
+ <div style="background-color: #ffffcc; color: green; padding: 1%; border: double #66cc99 4px; margin: 2% 0%;">
+ <h1 style="margin: 1% 0%;">
+Convert Root System to Bootable Software RAID1 (Debian)</h1>
+ <p>
+How to convert a Debian system to bootable Software RAID 1 with a second hard drive, 'mdadm' and a few standard UNIX tools</p>
+ </div>
+
+ <p style="font-size: 80%">
+Version 0.97 (2004-06-03) Lucas Albers -- admin At cs DOT montana dot edu and Roger Chrisman <br />
+Home of most recent version: <a href="http://alioth.debian.org/projects/rootraiddoc" target="_blank">
+http://alioth.debian.org/projects/rootraiddoc</a><br />
+Thanks to: Alvin Olga, Era Eriksson, Yazz D. Atlas, James Bromberger, Timothy F Nagy, and alioth.debian.org<br />
+ <p><b>
+WARNING: No warranty of any kind. Proceed at your own risk.</b> A typo, especially in lilo.conf, can leave your system unbootable. <b>Back-up data and make a boot floppy <i>before starting this procedure</i>.</b></p>
+ </div>
+
+ <!-- Table of Contents -->
+ <div id="TOC" class="toc">
+
+ <h1>Table of Contents</h1>
+
+ <h3><a href="#summary">
+Summary</a></h3>
+
+ <h3><a href="#1">
+Procedure</a></h3>
+
+ <ol>
+ <li style="margin: 1% 0%;"><a href="#1">
+Install Debian</a><br />
+on your Primary Master disk -- hda. Or if you already have Debian installed, go to step 2.</li>
+ <li style="margin: 1% 0%;"><a href="#2">
+Upgrade to RAID savvy Kernel</a><br />
+and install 'mdadm'.</li>
+ <li style="margin: 1% 0%;"><a href="#3">
+Setup RAID 1</a><br />
+declaring disk-one 'missing' and disk-two hdc.</li>
+ <li style="margin: 1% 0%;"><a href="#4">
+Copy your Debian system</a><br />
+from hda to /dev/md0 ('missing' + 'hdc').</li>
+ <li style="margin: 1% 0%;"><a href="#5">
+Reboot to RAID device.</a><br /></li>
+ <li style="margin: 1% 0%;"><a href="#6">
+Reformat hda as 'fd' and declare it as disk-one of your RAID,</a><br />
+and watch the booted RAID system automatically mirror itself onto the new drive. Done.</li>
+ </ol>
+
+<h3>Alternate grub/initrd procedure</h3>
+<ol>
+ <li style="margin: 1% 0%;"><a href="#7">
+Part II. RAID using initrd and grub</a><br /> </li>
+</ol>
+
+ <h3><a href="#I">
+Appendix</a></h3>
+
+ <ol style="list-style-type: upper-roman;">
+ <li><a href="#I">
+RAID Introduction</a></li>
+ <li><a href="#II">
+Drive designators (hda, hdb, hdc, hdd), jumpers and cables</a></li>
+ <li><a href="#III">
+Setting up software RAID for multiple partitions</a></li>
+ <li><a href="#IV">
+Lilo</a></li>
+ <li><a href="#V">
+Copying Data</a></li>
+ <li><a href="#VI">
+Rebooting</a></li>
+ <li><a href="#VII">
+Initrd</a></li>
+ <li><a href="#VIII">
+Verify that system will boot even with one disk off-line</a></li>
+ <li><a href="#IX">
+Setting up a RAID 1 Swap device</a></li>
+ <li><a href="#X">
+Performance Optimizations</a></li>
+ <li><a href="#XI">
+Disaster Recovery</a></li>
+ <li><a href="#XII">
+Quick Reference</a></li>
+ </a></li>
+ <li><a href="#XIII">
+Troubleshooting </a></li>
+ <li><a href="#XIIII">
+Raid Disk Maintenance </a></li>
+ </ol>
+
+ <h3><a href="#references">
+References</a></h3>
+
+ </div>
+
+ <!-- Summary -->
+
+ <div id="summary" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h1>
+Summary</h1>
+ <p>
+We begin with Debian installed on the Primary Master drive, hda (<a href="#1">step 1</a>). We need RAID support in our Kernel (<a href="#2">step 2</a>). We add another disk as Secondary Master, hdc, set it up for RAID (<a href="#3">step 3</a>), and copy Debian to it (<a href="#4">step 4</a>). Now we can reboot to the RAID device (<a href="#5">step 5</a>) and declare hda part of the RAID and it automatically syncs with hdc to complete our RAID 1 device (<a href="#6">step 6</a>).</p>
+ <p>
+If all goes well</p>
+ <ul>
+ <li>You do not need a rescue disk or to boot off anything except the hard drive.</li>
+ <li>You can do this operation completely remotely.</li>
+ <li>And you will not lose any data.</li>
+ </ul>
+ <p style:"font-weight: bold; font-style: italic;">
+Use this HowTo at your own risk. We are not responsible for what happens!</p>
+ <p>
+First things first</p>
+ <ul>
+ <li>Backup your data.</li>
+ <li>Create a boot floppy.</li>
+ </ul>
+ <p>
+Whenever you change your partitions, you need to reboot! (If you know what you are
+doing, ignore this advice.)</p>
+ <p>
+I assume you will mess up a step so wherever possible, we include verification.</p>
+ <p>
+I use 'mdadm' because it is easier than 'raidtools' or 'raidtools2'.</p>
+ <p>
+<br>We now have grub and lilo directions, grub directions are still in beta form.
+<br>Read the grub directions, and comment on them.</p>
+
+
+
+
+ <!-- Procedure -->
+
+
+ <div id="1" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h1 style="padding-bottom: 6%;">
+Procedure</h1>
+
+
+ <h1>
+1. Install Debian</h1>
+ <p>
+Do a fresh install the normal way on your first drive, hda (the Primary Master drive in your computer).
+Or, if you already have a running Debian system that you want to use on hda; skip ahead to step 2.
+If you need Debian installation instructions, see:</p>
+ <p>
+<a href="http://www.debian.org/releases/stable/installmanual" target="_blank">
+Debian Installation HowTo</a> &raquo; http://www.debian.org/releases/stable/installmanual</p>
+ <p>
+<a href="http://d-i.alioth.debian.org/manual/" target="_blank">
+Sarge Debian Installation HowTo</a> &raquo; http://d-i.alioth.debian.org/manual/</p>
+
+ <div id="2" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h1>
+2. Upgrade to a RAID savvy Kernel</h1>
+
+
+ <h2>
+2.1 Compile and install a RAID savvy Kernel.</h2>
+ <p>
+RAID must be <i>compiled</i> into the Kernel, not added as a module, for you to boot from the RAID device (unless you use a RAID savvy initrd kernel or boot from a non-RAID boot drive. (I now cover initrd methods!). You need RAID 1 but I usually include RAID 5, too. For step by step Kernel compile and install instructions, see:</p>
+ <p><a href="http://newbiedoc.sourceforge.net/system/kernel-pkg.html" target="_blank">
+Creating custom Kernels with Debian's kernel-package system</a> &raquo; http://newbiedoc.sourceforge.net/system/kernel-pkg.html</p>
+
+<br>
+
+ <h2>
+2.2 Verify your RAID savvy Kernel.</h2>
+ <p class="code">
+cat /proc/mdstat</p>
+ <p>
+(You should see the RAID "personalities" your Kernel supports.)</p>
+Something like this:
+
+<p class="code">
+Personalities : [linear] [raid0] [raid1] [raid5]
+read_ahead 1024 sectors
+md4 : active raid5 hdh4[3] hdg4[2] hdf4[1] hde4[0]
+ 356958720 blocks level 5, 64k chunk, algorithm 2 [4/4] [UUUU]
+
+unused devices: <none>
+ <p>
+
+<p class="code">
+YOU MUST VERIFY you have raid support via /proc/mdstat.
+This is the most important item to verify before going any farther.
+So the kernel has to support it or you have to load the modules in initrd.
+<p>
+
+
+(This will show you if raid is compiled into kernel, or detected as a module from initrd.)
+/etc/modules will not list RAID if Kernel has RAID compiled in instead of loaded as modules.
+ <br>
+ Use lsmod to list currently loaded modules, this will show raid modules loaded.
+ <p class="code">
+ reiserfs <br>
+raid1 <br>
+ext2 <br>
+ide-disk <br>
+raid5 <br>
+ext3 <br>
+ <p>
+cat /etc/modules</p>
+ <p>
+(IF YOU SEE ANY RAID LISTED IN /etc/modules, then you probably have your Kernel loading RAID via modules. That will prevent you from booting from your RAID device, unless you use initrd. To boot from your RAID device, unless you use a RAID savvy initrd, you need RAID <i>compiled into</i> Kernel, not added <i>as a module.</i>)</p>
+ <h2>
+
+
+<h2> 2.3 Install 'mdadm':</h2>
+ <p class="code">
+apt-get install mdadm</p>
+2.4 List what IDE devices you have:</h2>
+<p class="code">
+ls /proc/ide</p>
+
+ <div id="3" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h1>
+3. Setup RAID 1</h1>
+ <p>
+Setup RAID 1 and declare disk-one of your RAID to be 'missing' and disk-two of your RAID to be 'hdc'.</p>
+
+
+ <h2>
+3.1 Create RAID (fd) partition on hdc</h2>
+ <p>
+Warning: ALWAYS give the partition when editing with cfdisk. By default cfdisk will select the first disk in the system. I accidentally wiped the wrong partition with cfdisk, once.</p>
+ <p>
+Do A or B, either way will work:</p>
+ <p>
+A. Create partitions on new disk.</p>
+ <p class="code">
+cfdisk /dev/hdc</p>
+
+ <p>or</p>
+
+ <p>B. copy existing partitions to new disk with sfdisk.</p>
+ <p class="code">
+sfdisk -d /dev/hda | sfdisk /dev/hdc</p>
+
+<br>NOTE: On some disks you cannot copy over the partitions correctly using this method
+<br>It will detect the new partition as 0 size or a strange size.
+<br>You will need to manually create the partitions, making them the same size with cfdisk.
+<br>
+<h2>
+3.2 Create correct partition type signatures on new partition.</h2>
+ <p class="code">
+cfdisk /dev/hdc</p>
+
+ <ul>
+ <li>Select Type, then hit enter, then type 'fd' (this means RAID type partition).</li>
+ <li>Select Write</li>
+ <li>Select Quit.</li>
+ </ul>
+
+ <p class="code">
+<span class="reboot">reboot</span></p>
+ <p>
+(To verify that everything is working ok.)
+
+
+ <h2>
+3.3 Create RAID device</h2>
+ <p>
+that has two members and one of the members does not exist yet. md0 is the RAID partition we are creating, /dev/hdc1 is the initial partition. We will be adding /dev/hda1 back into the /dev/md0</p>
+ <p>
+RAID set after we boot into /dev/md0.</p>
+ <p class="code">
+mdadm --create /dev/md0 --level=1 --raid-disks=2 missing /dev/hdc1</p>
+ <p>
+If this gives errors then you need to zero the super block, see useful mdadm commands.</p>
+
+
+ <h2>
+3.4 Format RAID device </h2>
+ <p>
+You can use reiserfs or ext3 for this, both work, I use reiserfs for larger devices. Go with what you trust.</p>
+ <p class="code">
+mkfs.ext3 /dev/md0</p>
+ <p>
+or</p>
+ <p class="code">
+mkfs -t reiserfs /dev/md0</p>
+
+
+ <div id="4" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h1>
+4. Copy your Debian system</h1>
+ <p>
+Copy your Debian system from hda to /dev/md0 ('missing' + 'hdc'). Then, check to
+make sure that the new RAID device is still setup right and can be mounted
+correctly. We do this with an entry in hda's /etc/fstab and a reboot. Note that
+by editing hda's /etc/fstab after the copy, instead of before, we leave the copy
+on md0 unaltered and only are editing hda's /etc/fstab. </p>
+
+
+ <p>
+<b>NB: THIS IS A BRANCH IN OUR SYSTEM CONFIGURATION (eg temporary!)</b>, but it
+will overwritten later by the md0 version of /etc/fstab by the sync in step 6.</p>
+
+
+ <h2>
+4.1 Create a mount point. </h2>
+ <p class="code">
+mkdir /mnt/md0</p>
+
+
+ <h2>
+4.2 Mount your RAID device. </h2>
+ <p class="code">
+mount /dev/md0 /mnt/md0</p>
+
+
+ <h2>
+4.3 Copy your Debian system to RAID device. </h2>
+ <p class="code">
+cp -axu / /mnt/md0</p>
+Please refer to the Copying data section to verify you copied the data correctly.
+<br>See <a href="#V">Copying Data</a>
+ <p>
+You don't need the -u switch; it just tells cp not to copy the files again if they exist. If you are running the command a second time it will run faster with the -u switch.</p>
+
+
+ <h2>
+4.4 Edit /etc/fstab so that you mount your new RAID partition on boot up.</h2>
+<p>
+This verifies that you have the correct partition signatures on the partition and that your partition is correct. Sample Line in <span class="code">/etc/fstab</span>:</p>
+ <p class="code">
+/dev/md0 /mnt/md0 ext3 defaults 0 0</p>
+ <p>
+Then</p>
+ <p class="code">
+<span class="reboot">reboot</span></p>
+ <p>
+And see if the RAID partition comes up.</p>
+ <p class="code">
+mount</p>
+ <p>
+Should show /dev/md0 mounted on /mnt/md0.</p>
+
+
+ <div id="5" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h1>
+5. Reboot to RAID device</h1>
+ <p>
+For step 5 reboot, we will tell Lilo that
+<ul>
+<li>as before, /boot and MBR are still on hda,<br />
+<li>and now we want root (/) to mount on md0.<br />
+</ul></p>
+ <p>
+We will, as before, be using hda's MBR (Master Boot Record is the first 512 bytes on a disk and is what the BIOS reads first in determining how to boot up a system) and hda's /boot dir (the kernel-image and some other stuff live here), but instead of mounting root (/) from hda, we will mount md0's root (/) (the root of our RAID device, currently running off of only hdc because we declared the first disk 'missing').
+
+
+ <h2>
+5.1 Configure Lilo to boot to the RAID device </h2>
+ <p>
+(Later we will configure Lilo to write the boot sector to the RAID boot device also, so we can still boot even if either disk fails.)</p>
+ <p>
+Add a stanza labeled 'RAID' to /etc/lilo.conf on hda1 so that we can boot with /dev/md0, our RAID device, as root (/):</p>
+ <p class="code">
+#the same boot drive as before.<br />
+boot=/dev/hda<br />
+image=/vmlinuz<br />
+label=RAID<br />
+read-only<br />
+#our new root partition.<br />
+root=/dev/md0</p>
+ <p>
+That makes an entry labeled 'RAID' specific to the RAID device, so you can still boot to /dev/hda if /dev/md0 does not work.</p>
+sample complete lilo.conf file:</p>
+ <p class="code">
+#sample working lilo.conf for raid.<br />
+#hda1,hdc1 are boot, hda2,hdc2 are swap<br />
+#hda3,hdc3 are the partition used by array<br />
+#root partition is /dev/md3 on / type reiserfs (rw)<br />
+#I named the raid volumes the same as the partition numbers<br />
+#this is the final lilo.conf file of a system completely finished,<br />
+#and booted into raid.<br />
+<br />
+<br />
+lba32<br />
+boot=/dev/md1<br />
+root=/dev/hda3<br />
+install=/boot/boot-menu.b<br />
+map=/boot/map<br />
+ prompt<br />
+ delay=50<br />
+ timeout=50<br />
+ vga=normal<br />
+ raid-extra-boot=/dev/hda,/dev/hdd<br />
+ default=RAID<br />
+ image=/boot/vmlinuz-RAID<br />
+ label=RAID<br />
+ read-only<br />
+ root=/dev/md3<br />
+ alias=1<br />
+
+ image=/vmlinuz<br />
+ label=Linux<br />
+ read-only<br />
+ alias=2<br />
+ <br />
+ image=/vmlinuz.old<br />
+ label=LinuxOLD<br />
+ read-only<br />
+ optional</p>
+
+
+ <h2>
+5.2 Test our new lilo.conf </h2>
+ <p class="code">
+lilo -t -v</p>
+ <p>
+(With a RAID installation, always run<span class="code"> lilo -t </span>first just to have Lilo tell you what it is about to do; use the<span class="code"> -v </span>flag, too, for verbose output.)</p>
+
+
+ <h2>
+5.3 Run Lilo </h2>
+ <p>
+Configure a one time Lilo boot via the<span class="code"> -R </span>flag and with a reboot with Kernel panic</p>
+ <p>
+The<span class="code"> -R &lt;boot-parameters-here&gt;</span> tells Lilo to only use the specified image for the next boot. So once you reboot it will revert to your old Kernel. </p>
+ <p>
+From 'man lilo':<br /><b>
+-R command line</b><br />
+This option sets the default command for the boot loader the next time it executes. The boot loader will then erase this line: this is a once-only command. It is typically used in reboot scripts, just before calling `shutdown -r'. Used without any arguments, it will cancel a lock-ed or fallback command line.</p>
+ <p> Before you can do the 'lilo -v -R RAID' command, you must first do a 'lilo' command to update the Lilo boot record with the contents of your new lilo.conf. Otherwise Lilo does not know what you mean by 'RAID' and you just get a 'Fatal: No image "RAID" is defined' error message when you do 'lilo -v -R RAID'. So,</p>
+ <p class="code">
+lilo<br />
+lilo -v -R RAID</p>
+
+
+ <h2>
+5.4 Edit /mnt/md0/etc/fstab and reboot</h2>
+ <p>
+to have /dev/md0 mount as root (/), when Lilo boots from our RAID device, /dev/md0.</p>
+ <p>
+Previous root (/) in fstab was:</p>
+ <p class="code">
+/dev/hda1 / reiserfs defaults 0 0</p>
+ <p>
+Edit it to:</p>
+ <p class="code">
+/dev/md0 / ext3 defaults 0 0</p>
+ <p>
+Note: edit /mnt/md0/etc/fstab, not /etc/fstab, because at the moment we are booted with hda1 as root (/) but we want to change the /etc/fstab that we currently have mounted on /mnt/md0/etc/fstab, our RAID device.</p>
+ <p>
+Reboot to check if system boots our RAID device, /dev/md0, as root (/). If it does not, just reboot again and you will come up with your previous boot partition courtesy of the<span class="code"> -R </span>flag in step 5.3 above.</p>
+ <p class="code">
+<span class="reboot">reboot</span></p>
+ <p>
+Verify /dev/md0 is mounted as root (/)</p>
+ <p class="code">
+mount</p>
+ <p>
+should show:</p>
+
+ <p class="code">
+/dev/md0 on / type reiserfs (rw)<br />
+proc on /proc type proc (rw)<br />
+devpts on /dev/pts type devpts (rw,gid=5,mode=620)</p>
+ <p>
+'type reiserfs' is just my example; you will see whatever your file system type is.</p>
+ <p>
+Now we are booted into the new RAID device -- md0 as root (/). Our RAID device only has one disk in it at the moment because we earlier declared the other disk as 'missing'. That was because we needed that other disk, hda, to install Debian on or because it was our pre-existing Debian system.</p>
+
+
+ <div id="6" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h1>
+6. Reformat hda as 'fd' and declare it as disk-one of your RAID</h1>
+
+ <p>
+For step 6 reboots, we tell Lilo that
+<ul>
+<li>as in step 5 above, our root (/) is now on md0.</li>
+<li>and now, /boot is also on md0,</li>
+<li>and MBR is on both hda and hdc.</li>
+</ul></p>
+ <p>
+Here we not only use md0's root (/) as in step 5, but also md0's /boot (it contains an identical kernel-image to the one on hda because we copied it here from hda in step 4, but we will be overwriting everything on hda in step 6 and can't continue relying on the stuff on hda) and MBR from either hda or hdc, whichever the BIOS can find (they will be identical MBRs and the BIOS will still find hda's MBR but in case the hda disk were to fail down the road we would want the BIOS to look on hdc as a fail over so that it could still boot up the system).</p>
+
+
+ <h2>
+6.1 Change the signature on /dev/hda to software RAID</h2>
+ <p class="code">
+cfdisk /dev/hda</p>
+ <ul>
+ <li>Select "/dev/hda1" </li>
+ <li>Then select "[Type]" </li>
+ <li>Then hit "enter". </li>
+ <li>Then type "FD". </li>
+ <li>We are setting partition to "Software RAID" </li>
+ <li>Should already be set. </li>
+ <li>Then Select "Boot" if not set, so that you can boot
+off the device. </li>
+ <li>All the boot partitions that are members of your bootable RAID device (hda1 and hdc1) should have the bootable flag set. If one is not set, set it here now</li>
+ <li>Then select "Write" and enter 'yes'. </li>
+ <li>Then select "Quite". </li>
+ </ul>
+ <p>
+My two hard disks are from different manufacturers and as it happens, while both are roughly 40G, they have different architectures in terms of sectors and precise size. So cfdisk was unable to make the partitions precisely the same size and I had hda1 29,997.60MB and hdc1 30,000MB. This didn't work when I get to the 'mdadm --add /dev/md0 /dev/hda1' step. I got a, "failed: no space left on device!" error. So I ran cfdisk again and made hda1 slightly larger than hdc1, since I could not make them both exactly the same size. Now hda1 is 30,005.83MB and the 'mdadm -add /dev/md0 /dev/hda1' step works :-). (The remaining 10,000MB on each disk I am using for other purposes, including a md1 of 1,000MB composed of hda2 and hdc2.)</p>
+
+
+ <h2>
+6.2 Add the first-disk to our existing RAID device</h2>
+ <p>
+And watch the booted RAID system automatically mirror itself onto the new drive. We are currently booted from MBR and /boot device on /dev/hdc1, with /dev/md0 as root (/).</p>
+ <p class="code">
+mdadm --add /dev/md0 /dev/hda1</p>
+ <p>
+Note: We are adding /dev/hda1 into our existing RAID device. See if it is syncing.</p>
+ <p class="code">
+cat /proc/mdstat</p>
+ <p>
+should show that it is syncing.</p>
+
+
+ <h2>
+6.3 Write new /etc/lilo.conf settings</h2>
+ <p>
+these are from when we are booted onto RAID.</p>
+ <p class="code">
+boot=/dev/md0<br />
+root=/dev/md0<br />
+#this writes the boot signatures to either disk.<br />
+raid-extra-boot=/dev/hda,/dev/hdc<br />
+image=/vmlinuz<br />
+label=RAID<br />
+read-only</p>
+ <p>
+YOU NEED THE raid-extra-boot to have it write the boot loader to all the disks.</p>
+ <p>
+YOU ARE OVERWRITING THE BOOT LOADER ON BOTH /dev/hda and /dev/hdc.</p>
+ <p>
+You can keep your old boot option to boot /dev/hda so you can boot RAID and /dev/hda.</p>
+ <p>
+But remember you don't want to boot into a RAID device in non RAID as it will hurt the synchronization. If you make changes on one disk and not the other.</p>
+
+
+ <h2>
+6.4 Run Lilo with -R option and reboot</h2>
+ <p>
+(we are currently booted into RAID)</p>
+ <p class="code">
+lilo -t -v</p>
+ <p class="code">
+lilo -R RAID</p>
+ <p>
+The -R option tells Lilo it to use the new Lilo setting only for the next reboot, and then revert back to previous setting.</p>
+ <p>
+ <div class="note">
+<b>Note 1:</b> Step 6.4 returned an error, "Fatal: Trying to map files from unnamed device 0x0000 (NFS/RAID mirror down ?)."</p>
+ <p>
+So I waited for the synchronization, started in Step 6.2, to finish (checking it with 'cat /proc/mdstat'). Once it was done, did 'lilo -t -v' again. No "Fatal" error; Lilo seems happy now (no "Fatal" message).</p>
+
+<b>Note 1a:</b> The synchronization however took two hours! I checked with 'hdparm' and it seems I have <b>DMA</b> turned off. Perhaps the synchronization would go faster with DMA turned on. Some examination of my system revealed that I did not have my computer's PCI chipset support compiled into my custom kernel. I recompiled the kernel (kernel 2.6.4) and selected the correct PCI chipset support for my computer and now DMA works correctly :-) and by default. For DMA to be default is also configurable in the PCI area of 'make menuconfig' during kernel compile configuration, and I chose it.</p>
+ <p>
+So I can now do Lilo with '-R <boot-parameter-here>' switch and reboot.</p>
+ <p>
+<b>Note 2:</b> another error, "Fatal: No image "RAID" is defined."</p>
+ <p>
+As in Step 5.3 above, I need to do 'lilo' first so that Lilo reads my new /etc/lilo.conf, otherwise Lilo does not know about my stanza labeled "RAID" which is new in my lilo.conf. (Yes I told Lilo about it on hda1 in step 5.3, but that was after I had copied the hda1 root (/) system to here, md0, which branched my system into two separate system configurations. So it needs to be done here, too. Then I can do 'lilo -R RAID'.</p>
+ <p>
+<b>Note 2a:</b> However, the '-R' switch is pointless here unless the lilo.conf stanza labeled "RAID" is *not* the first kernel-image stanza in my lilo.conf. Because if it *is* the first stanza, then it is the default stanza anyway, with or without the '-R'.</p>
+ <p>
+ </div>
+Then</p>
+ <p class="code"><span class="reboot">
+reboot</span></p>
+ <p>
+and check</p>
+ <p class="code">
+cat /proc/mdstat
+ <p>
+and check</p>
+ <p class="code">
+mount</p>
+ <p>
+to be sure all is as expected.</p>
+
+
+ <h2>
+6.5 Now run Lilo normally (without -R) and reboot</h2>
+ <p>
+See what Lilo will do.</p>
+ <p class="code">
+lilo -t -v</p>
+ <p>
+If it looks okay, do it:</p>
+ <p class="code">
+lilo</p>
+ <p class="code"><span class="reboot">
+reboot</span></p>
+ <p>
+and check</p>
+ <p class="code">
+cat /proc/mdstat</p>
+ <p>
+and check</p>
+ <p class="code">
+mount</p>
+ <p>
+as a final system check.</p>
+
+ <h2>
+Done.</h2>
+
+
+
+ <!-- Grub+initrd -->
+
+ <div id="7" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">^</a></div>
+
+<h1>Part II. RAID using initrd and grub</h1><font size -3>- Ferdy Nagy</font>
+<p>I used the following procedure with stock Debian 2.6.5, which has an initrd with all the modules ready to boot
+into RAID. The procedure also covers using grub as the boot loader. I built this from a bare install of Sarge
+using the new installer with grub as the boot loader, but most of this document is distro independent. My file system
+throughout is ext3 and it shouldn't take too much to use reiserfs.</p>
+<p>These steps reference back to the procedure sections outlined above and indicate where things differ due to initrd or
+grub, so you will have to read/do/be familiar with the above steps. Also, make sure you currently use grub
+as your boot loader, if you are using LILO, install grub and make sure it works before proceeding!</p>
+
+
+<h2>Section - 2. Upgrade to a RAID savvy kernel</h2>
+
+<a href="#2">Section 2</a>
+<p>When using initrd the kernel does not need to have the RAID compiled in, they will be loaded as modules. Make sure
+the kernel loads the RAID modules.</p>
+
+<p>Edit <span class="code">/etc/modules</span> and add</p>
+<p class="code">md<br/>
+raid1</p>
+
+
+<h2>Section - 3. Setup RAID 1</h2>
+Follow <a href="#3">section 3</a> to setup the RAID 1.
+
+
+<h2>Section - 4. Copy your Debian system</h2>
+Follow <a href="#4">section 4</a> to copy the debian system.
+
+
+<h2>Section - 5. Reboot to RAID device</h2>
+
+<p>Instead of section 5 using LILO, grub is used as the boot loader, and initrd used to load the kernel. A new kernel
+entry in the grub menu is created that refers to an initrd that is created which will start the md [raid] device.
+The original kernel entry will remain and can be reverted to if something goes wrong until RAID is running. This will
+still use grub loaded installed on the /dev/hda MBR.</p>
+
+<h3>5.1 Build a new RAID initrd</h3>
+
+<p>A) Make sure the initrd has the modules it needs, by editing <span class="code">/etc/mkinitrd/modules</span>. Add the
+following [you can see what modules are available by mounting the initrd and looking in the lib/modules - see section 8.]:</p>
+<p class="code">
+ md<br/>
+ raid1</p>
+
+<p>B) Update the initrd so that the root device loaded is the raid device, not probed. Edit the <span class="code">/etc/mkinitrd/mkinitrd.conf</span>, and update the ROOT line<br/>
+<span class="code">ROOT=/dev/md0</span></p>
+
+<p>C) Create the new initrd and a link to it.</p>
+<p class="code">mkinitrd -o /boot/initrd.img-2.6.5-raid</p>
+
+
+<h3>5.2 Update the grub boot menu</h3>
+
+<p>edit <span class="code">/boot/grub/menu.lst</span> </p>
+<p>1. Add the following entry</p>
+<p class="code">
+<pre>
+title Debian GNU/Linux, kernel 2.6.5-1-686 RAID
+root (hd0,0)
+kernel /boot/vmlinuz-2.6.5-1-686 root=/dev/md0 ro
+initrd /boot/initrd.img-2.6.5-1-686-raid
+savedefault
+boot
+</pre>
+</p>
+<p>2. Update the following kernel root option in the file. <b>Note:</b> the grub known issues, so
+this option will not be used anyway.</p>
+<p class="code"># kopt=root=/dev/md0 ro<br/>
+
+
+<h3>5.3 Do the above 5.4 Edit /mnt/md0/etc/fstab and reboot</h3>
+
+<p>[Copied from Part I 5.4 above]</p>
+<p>to have /dev/md0 mount as root (/), when grub boots from our RAID device, /dev/md0:</p>
+<p>Previous root (/) in fstab was:</p>
+<p class="code">/dev/hda1 / ext3 defaults 0 0</p>
+<p>Edit it to:</p>
+<p class="code">/dev/md0 / ext3 defaults 0 0</p>
+<p>Note: edit /mnt/md0/etc/fstab, not /etc/fstab, because at the moment we are booted with hda1 as root (/) but we
+want to change the /etc/fstab that we currently have mounted on /mnt/md0/etc/fstab, our RAID device.</p>
+<p>Reboot and choose the RAID kernel to check if system boots our RAID device, /dev/md0, as root (/). If it does not, just reboot again and choose the
+original pre-read kernel image</p>
+<p class="code"><span class="reboot">reboot</span></p>
+<p>Verify /dev/md0 is mounted as root (/)</p>
+<p class="code">mount</p>
+<p>should show something similar to:</p>
+
+<p class="code">/dev/md0 on / type ext3 (rw)<br />
+proc on /proc type proc (rw)<br />
+devpts on /dev/pts type devpts (rw,gid=5,mode=620)</p>
+
+<p>Now we are booted into the new RAID device -- md0 as root (/). Our RAID device only has one disk in it at the
+moment because we earlier declared the other disk as 'missing'. That was because we needed that other disk, hda, to
+install Debian on or because it was our pre-existing Debian system.</p>
+
+<span class="code">cat /proc/mdstat</span> shows the [degraded] array is up and running, note the [_U] - second disk is up.</p>
+
+
+<h2>Section - 6. Reformat hda as fd and declare it as disk-one of your raid</h2>
+
+<h3>6.1/2 Setup hda and add to array</h3>
+<p>Follow <a href="#6">steps 6.1, and 6.2</a>. <b>Wait</b> and make sure the drives are fully synced before proceeding.
+
+<h3>6.3 re-run mkinitrd again, and reboot.</h3>
+
+<p>This is needed to make sure that mkinitrd starts the newly built array with all drives. mkinitrd uses mdadm -D to
+discover what drives to assemble in the array during startup, this is contained in a script in the initrd image. If this
+step is not done the next time you reboot the array will be degraded.</p>
+
+<p> Do the following</p>
+<p class="code">mkinitrd -o /boot/initrd-2.6.5-raid.img</p>
+
+<p><span class="reboot">reboot</span></p>
+
+<p>and check the array is fully up, look for the [UU]</p>
+
+<p class="code">cat /proc/mdstat</p>
+
+<p>and check /dev/md0 is mounted</p>
+
+<p class="code">mount</p>
+
+
+<h2>7. Put grub into the MBR of the second disk</h2>
+
+<p>grub refers to the boot(ed) device as hd0, so if the primary hard drive (/dev/hda) fails the system will look for
+the next bootable device (/dev/hdc) and loads it's MBR, which grub will still refer to as hd0. So, the grub
+configuration can still use hd0 even when the primary device fails.</p>
+
+<h3>7.1 Put grub into the MBR</h3>
+
+<p>These steps temporarily tell grub the second device is hd0 and then loads the MBR.</p>
+
+<p>start the grub command line, then run the load commands. <b>Note:</b> grub partition references
+are offset by 1, so in the following with a partition of /dev/hdc1, the root is (hd0,0) [previous line tells
+grub to set hdc as hd0]. If the partition was /dev/hdc2, the root would be (hd0,1)!</p>
+<p class="code">grub<br/>
+grub> device (hd0) /dev/hdc<br/>
+grub> root (hd0,0)<br/>
+grub> setup (hd0)<br/>
+</p>
+
+<h3>7.2 Testing</h3>
+
+<p>reboot, verify the /proc/mdstat devices always start. Follow <a href="VIII">section VIII</a> and verify the
+system boots with one disk off line.</p>
+
+
+<h2>8. Known Issues</h2>
+
+<h3>grub</h3>
+<p>grub will already be installed on hda, and you will manually force grub to be installed on hdc so the MBRs are
+ok; however, <span class="code">install-grub</span> and <span class="code">update-grub</span> will fail because
+grub does not understand the md0 device. This is not a problem with install-grub as it will not be executed again
+after it has been installed, but update-grub is executed after an updated kernel is apt'd, causing an error to be
+reported by apt. The update-grub error is ok, the kernel gets installed and the initrd is created with all
+the md array information, provided the array was not degraded during the kernel upgrade. <b>But</b> you will have
+to <b>manually</b> update the grub menu.lst and add the new kernel information before you reboot, or the new
+kernel will not appear in the grub menu.</p>
+
+<h3>mkinitrd</h3>
+<p>When using mdadm, mkinitrd will only detect disks in the array that are running at the time of execution. You should
+not install a new kernel while the array is degraded, otherwise, even if you do an mdadm --add, the next reboot will
+still be degraded! The array is started at boot time by <span class="code">script</span>. You can see what
+is in the script of the initrd by mounting it, e.g.</p>
+<p class="code">mount /boot/initrd.img-<b>X.X.X</b> /mnt -o loop<br/>
+cat /mnt/script</p>
+<p>And look for the array start line similar to</p>
+<p class="code">mdadm -A /devfs/md/0 -R -u 23d8dd00:bc834589:0dab55b1:7bfcc1ec /dev/hda1 /dev/hdc1</p>
+
+ <!-- Appendix -->
+
+ <div id="I" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h1>
+Appendix</h1>
+
+
+ <h2>
+I. RAID 1 Introduction</h2>
+ <p>
+Redundant Array of Inexpensive Disks (RAID) refers to putting more than one hard disk to work together in various advantageous ways. Hardware RAID relies on special hardware controllers to do this and we do not covered in this HowTo. Software RAID, this HowTo, uses software plus the ordinary controllers on your computer's motherboard and works excellently.</p>
+ <p>
+RAID 1 is where you use two hard drives as if they were one by mirroring them onto each other. Advantages of RAID 1 are (a) faster data reads because one part of the data can be read from one of the disks while simultaneously another part of the data is read from the other disk, and (b) a measure of fail over stability -- if one of the disks in the RAID 1 fails, the system will usually stay online using the remaining drive while you find time to replace the failed drive.</p>
+ <p>
+To achieve the speed gain, the two disks that comprise your RAID 1 device must be on separate controllers (in other words, on separate drive cables). The first part of the data is read from one disk while simultaneously the second part of data is read from the other disk. Writing data to a RAID 1 device takes twice as long apparently. However, under most system use data is more often read from disk than written to disk. So RAID 1 almost doubles the effective speed of your drives. Nice.</p>
+ <p>
+RAID is not a substitute for regular data back ups. Many things can happen that destroy both your drives at the same time.</p>
+
+
+ <div id="II" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h2>
+II. Drive designators (hda, hdb, hdc, hdd), jumpers and cables</h2>
+ <p>
+<b>Drive designators.</b></p>
+ <p>
+Drives on IDE 1 -- Primary Controller</p>
+ <ul>
+ <li>
+hda, Primary Master drive</li>
+ <li>
+hdb, Primary Slave drive</li>
+ </ul>
+ <p>
+Drives on IDE 2 -- Secondary Controller</p>
+ <ul>
+ <li>
+hdc, Secondary Master drive</li>
+ <li>
+hdd, Secondary Slave drive</li>
+ </ul>
+ <p><b>
+Jumpers.</b> When moving drives around in your computer, be sure to set the jumpers on your drives correctly. They are the little clips that connect two of various pins on your drive to set it to Cable Select, Master, or Slave. IDE drives usually have a diagram right on their case that shows where to set the clip for what setting. Different brands sometimes use different pin configurations.
+ <p><b>
+Cables.</b> Use 80 wire 40 pin IDE drive cables, not 40 wire 40 pin or you will slow down your hard drive access. For best results, cables should be no longer than the standard 18". If your cable has a blue end, that's the end to attach to the mother board (I don't know why). I don't think it matters which of the two drive connectors on the cable you plug your drive into, the middle or end one, unless you use Cable Select in which case I believe the sable's end plug is Master and its middle plug is Slave.</p>
+
+
+ <div id="III" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h2>
+III. Setting up software RAID for multiple partitions.</h2>
+ <p>
+You can have a multi-partition RAID system if you prefer. You just need to create multiple RAID devices.</p>
+ <p>
+I have found it useful when setting software RAID on multiple partitions to set the RAID device to the same name as the disk partition.</p>
+ <p>
+If you have 3 partitions on /dev/hda and I want to add /dev/hdc for software RAID, then boot /dev/hdc and add /dev/hda back into the device, exactly what I did earlier, but with 3 partitions which are: hda1=/boot, hda2=/, hda3=/var</p>
+ <p class="code">
+sfdisk -d /dev/hda | sfdisk /dev/hdc;<br />
+reboot<br />
+mdadm --zero-superblock /dev/hda1<br />
+mdadm --zero-superblock /dev/hda2<br />
+mdadm --zero-superblock /dev/hda3<br />
+mdadm --create /dev/md1 --level=1 --raid-disks=2 missing /dev/hdc1<br />
+mdadm --create /dev/md2 --level=1 --raid-disks=2 missing /dev/hdc2<br />
+mdadm --create /dev/md3 --level=1 --raid-disks=2 missing /dev/hdc3<br />
+mkfs.reiserfs /dev/md1;mkfs.reiserfs /dev/md2; mkfs /dev/md3;<br />
+mkdir /mnt/md1 /mnt/md2 /mnt/md3;<br />
+cp -ax /boot /mnt/md1;cp -ax / /mnt/md2; cp -ax /var /mnt/md3;</p>
+ <p>
+add entry in current fstab for all 3 and REBOOT.</p>
+ <p>
+Sync data again, only copying changed stuff.
+ <p class="code">
+cp -aux /boot /mnt/md1;cp -aux / /mnt/md2; cp -aux /var /mnt/md3;</p>
+ <p>
+edit lilo.conf entry in this case:
+ <p class="code">
+boot=/dev/md1<br />
+root=/dev/md2</p>
+ <p>
+Edit /mnt/md2/etc/fstab to have / set to /dev/md2.</p>
+ <p>
+REBOOT into RAID.</p>
+ <p>
+Add devices in:
+ <p class="code">
+mdadm --add /dev/md1 /dev/hda1<br />
+mdadm --add /dev/md2 /dev/hda2</p>
+ <p>
+Wait for sync, write Lilo permanently, and REBOOT into your setup.</p>
+ <p>
+It is not harder to include more devices in a software RAID device.</p>
+
+
+ <div id="IV" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h2>
+IV. Lilo</h2>
+ <p>
+You need special entries to use Lilo as your boot loader, I couldn't get grub to work, but nothing prevents you from using grub. Just standard Lilo/grub entries WILL NOT WORK FOR RAID.</p>
+ <p>
+Entries in /etc/lilo.conf:
+ <p class="code">
+raid-extra-boot=&lt;option&gt;</p>
+ <p>
+That option only has meaning for RAID 1 installations. The &lt;option&gt; may be specified as none, auto, mbr-only, or a comma-separated list of devices; e.g., "/dev/hda,/dev/hdc6".</p>
+ <p><span class="code">
+panic='' </span>line in lilo.conf tells Lilo to automatically boot back to the old install if something goes wrong with the new Kernel.</p>
+
+
+ <div id="V" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h2>
+V. Copying data</h2>
+ <p>
+Use "cp -aux" to just copy updated items. if you are copying a partition that is not root you need to copy the subdirectories and not the mount point, otherwise it will just copy the directory over. To copy boot which is a separately mounted partition to /mnt/md1 which is our new software RAID partition we copy as thus: "cp -aux /boot/* /mnt/md1" NOTE THE DIFFERENCE when copying mount points and not just /. If you just do cp -aux /boot /mnt/md1 it will just copy over boot as a subdirectory of /mnt/md1.</p>
+ <p>
+Or, alternatively, you could copy the root system with 'find' piped to 'cpio', like this:</p>
+ <p><p class="code">
+cd /<br />
+find . -xdev -print | cpio -dvpm /mnt/md0</p>
+
+
+
+ <div id="VI" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h2>
+VI. Rebooting</h2>
+ <p>
+You should always reboot if you have changed your partitions, otherwise the Kernel will not see the new partitions correctly. I have changed partitions and not rebooted, and it caused problems. I would rather have the simpler longer less potentially troublesome approach. Just because it appears to work, does not mean it does work. You really only need to reboot if you are CHANGING or rebooting a new Lilo configuration. Don't email me if you hose yourself because you did not feel the urge to reboot. Trust me.</p>
+
+
+ <div id="VII" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h2>
+VII. initrd</h2>
+ <p>
+initrd: Use RAID as initrd modules. </p>
+ <p>
+The Kernel that is installed when you first build a system does not use an initrd.img.
+However the default kernel uses initrd. So you can use a stock kernel for with
+software raid.</p>
+ <p>
+The new Kernel by default won't contain the right modules for creating a RAID savvy initrd, but they can be added.</p>
+ <p>
+&nbsp;<p>
+(Per James Bromberger)<p>
+Now we need to prepare for running a RAID setup. Our packages need an update.
+Use apt, because it rocks, and install the following: <br>
+<p class="code">
+DevFSd <br>
+kernel-image-2.4.x (whatever suits you) <br>
+reiserfsprogs <br>
+less <br>
+screen <br>
+vim <br>
+<p>
+...Anything else you need and can't live without for the next 10 minutes <br>
+<br>
+You might already have some of these modules in the kernel, eg ext2.
+Edit /etc/modules and add the following modules: <br>
+<p class="code">
+reiserfs <br>
+md <br>
+raid1 <br>
+ext2 <br>
+ide-disk (might not need this one.)<br>
+raid5 <br>
+ext3 <br>
+ide-probe-mod (might not need this one.)<br>
+ide-mod (might not need this one.) <br>
+<p>
+<br>
+Edit /etc/mkinitrd/modules, and add the same modules to this list. Your initrd
+image needs to be able to read and write to your RAID array, before your
+filesystem is mounted. Initrd is the trick here. You probably also want to see
+if you need to edit /etc/mkinitrd/mkinitrd.cfg and set the variable ROOT=probe
+to be ROOT=/dev/md0, or possibly, if using DevFS, ROOT=/dev/md/0. <br>
+<br>
+Regenerate your initrd image for your new kernel with
+<p class="code">
+
+mkinitrd -o /tmp/initrd-new /lib/modules/2.4.x-... .
+
+<p> If all is good, move this to /boot/initrd-2.4.x-... and
+edit your /etc/lilo.conf to add initrd=/boot/initrd against the &quot;Linux&quot; kernel
+entry. Run lilo, and you should see an asterisk next to the boot image &quot;Linux&quot;.<p>
+With those modules you should be able to install the new kernel-image package. The install will add those modules to the initrd.img that. Now you can do for example (I actually only tested with kernel-image-2.4.24-1-686-smp on a machine using testing and unstable listed in the /etc/apt/source.list)
+ <p class="code">
+apt-get install kernel-image-2.4.24-1-686-smp</p>
+ <p>
+You will need to modify /etc/lilo.conf to include the right stuff. Otherwise the post install scripts for the package will likely fail.
+ <p class="code">
+image=/vmlinuz<br />
+label=Linux<br />
+initrd=/initrd.img</p>
+ <p>
+(The above is all one line)</p>
+ <p>
+Run Lilo and REBOOT.</p>
+ <p>
+You should now have the modules loaded. Check with:<span class="code"> cat /proc/mdstat </span></p>
+
+
+ <div id="VIII" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h2>
+VIII. Verify that system will boot even with one disk off-line</h2>
+ <p>
+Roger did it this way.</p>
+ <ol>
+ <li>Shutdown and power-off your computer.</li>
+ <li>Open up computer and unplug the power to Primary Master disk (/dev/hda).</li>
+ <li>Start up your computer. It should boot up from the other disk.</li>
+ <li>Now look at<br />
+ <span class="code">cat /proc/mdstat</span><br />
+ you should see that one of the disks in your md0 has "failed".</li>
+ <li>Shutdown and then unplug the power to you computer, again.</li>
+ <li>Reconnect the power to Primary Master disk.</li>
+ <li>Start up your computer, again. It should boot up from the other disk still. It wont try to access the disk that it now has on record as "failed" until you re-add it to your RAID. Look again at<br />
+ <span class="code">cat /proc/mdstat</span><br />
+ you should still see one of the disks in your md0 listed as "failed". If this were not a simulation it probably would be failed and you would want to replace it with a new one. But for the simulation we just un-plug and later re-plug the power connector to the disk.</li>
+ <li>Now that you have re-connected the power to the disk (or replaced it with a new one were it really was a failed disk) bring it back online with mdadm,<br />
+ <span class="code">mdadm --add /dev/md0 /dev/hda1</span><br />
+ and check its status with,
+ <span class="code">cat /proc/mdstat</span><br />
+ you should see that it is being synchronized the the other disk in your RAID 1.</li>
+ <li><b>WAIT until the synchronization has completed.</b> Then you can try the above again but unplugging the other disk in your RAID 1. <b>WARNING</b> if you do not wait for synchronization to fully complete (check with '/proc/mdstat') you will have a <b>real problem</b> because your system is only partially rebuild on the "new" disk until synchronization has finished.</li>
+ </ol>
+ <p class="note">
+NB: I (Roger) had to disconnect power to my CD-ROM drive (because my CD-ROM was on /dev/hdd -- Secondary Slave) in order to boot with my Secondary Master disconnected. Otherwise my BIOS refused to boot the machine because my CD-ROM was then a Slave on a cable without any Master. Your mileage may vary. :-) So I decided to leave my CD-ROM disconnected, as this is a server and I need it to boot even with a failed drive more than I need the convenience of keeping the CD-ROM connected. I can of course connect the CD-ROM when I need it as long as I have a working Master drive on its cable with it or set it to Master.</p>
+
+
+ <div id="IX" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h2>
+IX. Setting up a RAID 1 Swap device</h2>
+ <p>
+I created a swap RAID device as follows:</p>
+ <p>
+(I have a 1000MB hda2 and a 1000MB hdc2, both as type 'fd' created with 'cfdisk', that I will use as md1 for swap.)</p>
+ <p>
+ (Or you can just create the swap parttions on the actual disk, don't put swap on raid.
+ Just put a swap partition on each disk in your raid set on an empty partition.)
+
+ <p class="code">
+
+ <p>
+Add a Swap entry in /etc/fstab, just after root (/) partition line. Example line to add to /etc/fstab:
+ <p class="code">
+/dev/md1 none swap sw 0 0</P>
+ <p>
+Reboot and the boot sequence should start up the Swap when it reads /etc/fstab.</p>
+ <p class="code"><span class="reboot">
+reboot</span></p>
+ <p class="todo">
+ You can argue whether swap should be on raid. A large colo admin mentions that he does not use swap on raid. Keep it as simple as possible. You decide.</p>
+
+
+ <div id="X" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h2>
+X. Performance Optimizations </h2>
+ For every ide drive turn on hdparm.
+ <br>
+ <p class="code">&nbsp;<p class="code">hdparm -d1 -c3 /dev/hda /dev/hdc<p>
+ <br>You need to use bonnie++ to measure software raid performance
+ <br>You want all your devices to be as masters. As your limited to total bandwidth on that chain of
+ <br>hard drives.
+ <br> I just stick as many hard drives in the system as possible,
+ <br> I have not encountered problems where having disks on the same master
+ <br> slave channel caused a slowdown.
+
+ <div id="XI" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h2>
+XI. Disaster Recovery
+ </h2>
+
+ <h2>
+ <p class="todo">
+(These directions are untested, I need to adopt them to mdadm instead of raid2 --luke)</p></h2>
+
+ <P>So what to do if you can't get your root RAID1 filesystem to boot? Here is a
+straightforward way to get to your md0:</P>
+<UL>
+<br>Find the 2.4 kernel install media from $DEBIAN/dists/unstable/main/disks-i386,
+<br> and download the <STRONG>bf2.4</STRONG> set of disks.
+<br> You only need the rescue and root images.
+<br>Find the corresponding kernel-image-2.4.18-bf2.4_2.4.18-4_i386.deb or
+<br>similar; and unpack this somewhere with
+<p class="code"> &nbsp;</p>
+<p class="code"> dpkg-deb -x kernel-image-2.4.yy-bf45.deb temp/ </p>
+<br>In the temp directory, find the md.o and raid1.o modules.
+<br>Copy them to a new floppy in /floppy/boot.
+<br>Copy /sbin/raid* to the root of the floppy disk (/floppy). You'll notice
+<br>that all the raid programs are symlinks to the same binary; doesn't matter,
+<br>since you probably have a vfat disk that doesn't know about symlinks. Just make
+<br>multiple copies. (Or be smart here and use an ext2 disk).
+<br>Boot with the rescue, then with the root disk
+<br>After choosing a language and keyboard from the installer, choose to preload
+<br>some modules. Grab that third disk you just put those modules and binaries on,
+<br>and put it in the floppy drive.
+<br>Load up md.o first, and then raid1.o.
+<br>Press Alt-F2 to get a text console.
+<p class="code">
+<br>mount /floppy
+<br>cp /etc/raid* /sbin
+<p class="code">
+# (Ie: copy to the ramfs /sbin)
+<br>mkdir /etc/raid<p class="code">
+cp /floppy/raidtab /etc/raid
+<p class="code">
+ln -s /etc/raid/raidtab /etc/raidtab
+<p class="code">
+raidstart /dev/md0<p class="code">
+mount -t reiserfs /dev/md0 /target </UL>
+</p>
+
+ <div id="XII" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h2>
+XII. Quick Reference</h2>
+ <p><b>
+DON'T JUST LOOK AT THIS QUICK REFERENCE. Understand the rest of the document.</b></p>
+ <h3 style="font-style: italic; margin: 3% 1% 2% -2%;">
+Quick Reference -- setting up bootable system on /dev/md0 using /dev/hda and /dev/hdc as RAID 1 component disks</h3>
+ <p>
+Verify RAID savvy Kernel. (1) You should see the RAID "personalities" your Kernel supports:</p>
+ <p class="code">
+cat /proc/mdstat</p>
+ <p class="code">
+ dmsg|grep -i RAID
+ <p>
+(This will show you if raid is compiled into kernel, or detected as a module from initrd.)
+/etc/modules will not list RAID if Kernel has RAID compiled in instead of loaded as modules.
+Use lsmod to list currently loaded modules this will show raid modules loaded.
+ <p>
+(2) You should NOT see any RAID modules in /etc/modules (If you do, review step 2 of Procedure):</p>
+ <p class="code">
+cat /etc/modules</p>
+ <p>
+Copy partitions hda to hdc:
+ <p class="code">
+sfdisk -d /dev/hda | sfdisk /dev/hdc </p>
+ <p>
+Create array:
+ <p class="code">
+mdadm --create /dev/md0 --level=1 --raid-disks=2 missing /dev/hdc1 </p>
+ <p>
+Copy data:
+ <p class="code">
+cp -ax / /mnt/md0 </p>
+ <p>
+Example /etc/lilo.conf entry for 1 disk RAID device:
+ <p class="code">
+boot=/dev/hda<br />
+image=/vmlinuz<br />
+label=RAID<br />
+read-only<br />
+#our new root partition.<br />
+root=/dev/md0</p>
+ <p>
+Add second disk to array:
+ <p class="code">
+mdadm --add /dev/md0 /dev/hdc1 </p>
+ <p>
+Example final /etc/lilo.conf entry:
+ <p class="code">
+boot=/dev/md0<br />
+root=/dev/md0<br />
+#this writes the boot signatures to either disk.<br />
+raid-extra-boot=/dev/hda,/dev/hdc<br />
+image=/vmlinuz<br />
+label=RAID<br />
+read-only</p>
+
+
+ <h3 style="font-style: italic; margin: 3% 1% 2% -2%;">
+Useful 'mdadm' commands</h3>
+ <p>
+Always zero the superblock of a device before adding it to a RAID device. Why? Because the disks decide what array they are in based on the disk-id information written on them. Zero the superblock first in case the disk was part of a previous RAID device. Also, if a partition was part of a previous RAID device, it appears to store the size of it's previous partition in the signature. Zeroing the superblock before adding it to a new RAID device takes care of cleaning up that, too.</p>
+ <p>
+Erase the MD superblock from a device:
+ <p class="code">
+mdadm --zero-superblock /dev/hdx</p>
+ <p>
+Remove disk from array:</p>
+ <p class="code">
+mdadm --set-faulty /dev/md1 /dev/hda1 <br />
+mdadm --remove /dev/md1 /dev/hda1</p>
+ <p>
+Replace failed disk or add disk to array:
+ <p class="code">
+mdadm --add /dev/md1 /dev/hda1</p>
+ <p>
+(that will format the disk and copy the data from the existing disk to the new disk.)</p>
+ <p>
+Create mdadm config file:
+ <p class="code">
+echo "DEVICE /dev/hda /dev/hdc" &gt; /etc/mdadm/mdadm.conf<br />
+mdadm --brief --detail --verbose /dev/md0 &gt;&gt; /etc/mdadm/mdadm.conf<br />
+mdadm --brief --detail --verbose /dev/md1 &gt;&gt; /etc/mdadm/mdadm.conf</p>
+ <p>
+To stop the array completely:
+ <p class="code">
+mdadm -S /dev/md0</p>
+
+
+ <div id="XIII" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h2>
+XIII. Troubleshooting </h2>
+ <br>The main problems people encounter is:</br>
+ <br>Kernel must have support for raid compiled in or loaded correctly in initrd.</br>
+ <br>You will actually have 2 configurations of raid. You boot to the failed raid volume,</br>
+ <br>then add in the original disk, then boot the final raid configuration.</br>
+
+ <br>Performance is too slow:</br>
+ <br>See <a href="#X"> Performance Optimizations</a>
+
+ <div id="XIIII" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h2>
+XIIII. Raid Disk Maintenance. </h2>
+<br>You need to configure raid to monitor for errors. </br>
+<br>It will email you when it detects and error </br>
+<br>Once a failed disk is detected, remove it and then add it back in.</br>
+<br>Create an mdadm.conf file <br>
+<br>See <a href="#XII"> mdadm commands</a>
+<br>You can also configure hot spare, that will come online if a disk fails. </br>
+<br><configure smart monitoring of disk diagnostics to detect pre-failing disks </br>
+<p class="todo">
+ Finish directions on smart monitoring and mdadm configuration to monitor disks,and hot spares.
+<p>
+
+ <!-- References -->
+
+ <div id="references" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+ <h1>
+References</h1>
+ <p>
+RAID 1 Root HowTo PA-RISC<br /><a href="http://www.parisc-linux.org/faq/raidboot-howto.html" target="_blank">
+http://www.pa-RISC-linux.org/faq/RAIDboot-howto.html</a></p>
+ <p>
+Lilo RAID Configuration:<br /><a href="http://lists.debian.org/debian-user/2003/debian-user-200309/msg04821.html" target="_blank">
+http://lists.debian.org/debian-user/2003/debian-user-200309/msg04821.html</a></p>
+ <p>
+Grub RAID Howto<br /><a href="http://www.linuxsa.org.au/mailing-list/2003-07/1270.html" target="_blank">
+http://www.linuxsa.org.au/mailing-list/2003-07/1270.html</a></p>
+ <p>
+Building a Software RAID System in Slackware 8.0<br /><a href="http://slacksite.com/slackware/raid.html" target="_blank">
+http://slacksite.com/slackware/RAID.html</a></p>
+ <p>
+Root-on-LVM-on-RAID HowTo<br /><a href="http://www.midhgard.it/docs/lvm/html/install.disks.html" target="_blank">
+http://www.midhgard.it/docs/lvm/html/install.disks.html</a></p>
+ <p>
+Software RAID HowTo<br /><a href="http://unthought.net/Software-RAID.HOWTO/Software-RAID.HOWTO.txt" target="_blank">
+http://unthought.net/Software-RAID.HOWTO/Software-RAID.HOWTO.txt</a></p>
+ <p>
+HowTo - Install Debian Onto a Remote Linux System<br /><a href="http://trilldev.sourceforge.net/files/remotedeb.html" target="_blank">
+http://trilldev.sourceforge.net/files/remotedeb.html</a></p>
+ <p>
+Kernel Compilation Information and good getting started info for Debian<br /><a href="http://newbiedoc.sourceforge.net/" target="_blank">
+http://newbiedoc.sourceforge.net</a></p>
+ <p>
+Initrd information and Raid Disaster Recovery, </p>
+ <p>
+<a href="http://www.james.rcpt.to/programs/debian/raid1/">
+http://www.james.rcpt.to/programs/debian/raid1/</a></p>
+
+ <div id="bottom" class="up" onMouseOver="status='^ up to Table of Contents';" onMouseOut="status=''" onClick="(location.hash == '#TOC')? location.reload(): location.hash = 'TOC'; return false;"><a class="up" href="#TOC">
+^</a></div>
+
+
+</body>
+</html>
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 00000000..79324a15
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,103 @@
+#!/usr/bin/make -f
+# Copyright (C) 2001-2005 Mario Jou/3en <joussen@debian.org>
+# Copyright (C) 2005-2006 Martin F. Krafft <madduck@debian.org>
+# Distributable under the terms of the GNU GPL version 2.
+
+#export DH_VERBOSE=1
+
+CXFLAGS = -ggdb
+CXFLAGS_UDEB := $(CXFLAGS) -fomit-frame-pointer
+ifneq (,$(findstring noopt,$(DEB_BUILD_OPTIONS)))
+ CXFLAGS += -O0
+ CXFLAGS_UDEB += -O0
+else
+ CXFLAGS += -O2
+ CXFLAGS_UDEB += -Os
+endif
+
+include /usr/share/dpatch/dpatch.make
+
+.PHONY: configure
+configure:
+ dh_testdir
+ make -sC debian/patches
+
+build: configure patch build-stamp
+build-stamp:
+ dh_testdir
+ $(MAKE) -f debian/rules mdadm.udeb DEBIAN=yes
+ $(MAKE) clean
+ $(MAKE) -f debian/rules mdadm DEBIAN=yes
+ touch $@
+
+mdadm.udeb: FLAGS = CXFLAGS="$(CXFLAGS_UDEB)" CONFFILE=/tmp/mdadm.conf
+mdadm.udeb: configure
+ dh_testdir
+ $(MAKE) $(FLAGS) all
+ mv mdadm mdadm.udeb
+.PHONY: mdadm.udeb
+
+mdadm: FLAGS = CXFLAGS="$(CXFLAGS)" CONFFILE=/etc/mdadm/mdadm.conf CONFFILE2=/etc/mdadm.conf
+mdadm: configure
+ dh_testdir
+ $(MAKE) $(FLAGS) all
+.PHONY: mdadm
+
+clean: clean-patched unpatch
+clean-patched:
+ dh_testdir
+ dh_testroot
+ rm -f build-stamp
+ -$(MAKE) clean
+ rm -f mdadm.udeb mdadm
+ dh_clean
+ debconf-updatepo
+
+install: DESTDIR=$(CURDIR)/debian/mdadm
+install: DESTDIR_UDEB=$(DESTDIR)-udeb
+install: build
+ dh_testdir
+ dh_testroot
+ dh_clean -k
+ dh_installdirs
+ install -m0755 mdadm $(DESTDIR)/sbin
+ install -m0755 debian/mdrun $(DESTDIR)/sbin
+
+ install -m0755 mdadm.udeb $(DESTDIR_UDEB)/sbin/mdadm
+
+ install -m0755 debian/initramfs/hook \
+ $(DESTDIR)/usr/share/initramfs-tools/hooks/mdadm
+ install -m0755 debian/initramfs/script.local-top \
+ $(DESTDIR)/usr/share/initramfs-tools/scripts/local-top/mdadm
+
+ install -m0755 debian/mkconf $(DESTDIR)/usr/share/mdadm
+ install -m0755 debian/checkarray $(DESTDIR)/usr/share/mdadm
+ install -m0755 debian/bugscript $(DESTDIR)/usr/share/bug/mdadm/script
+
+ install -m0644 debian/mdadm.lintian-overrides \
+ $(DESTDIR)/usr/share/lintian/overrides/mdadm
+
+binary-indep: build install
+
+binary-arch: build install
+ dh_testdir
+ dh_testroot
+ dh_installdebconf
+ dh_installdocs debian/README.experimental
+ dh_installexamples
+ dh_installinit --init-script=mdadm-raid --no-start -- start 25 S . start 50 0 6 .
+ dh_installinit -- defaults 25
+ dh_installman
+ dh_installcron
+ dh_installchangelogs ChangeLog
+ dh_strip
+ dh_compress
+ dh_fixperms
+ dh_installdeb
+ dh_shlibdeps
+ dh_gencontrol
+ dh_md5sums
+ dh_builddeb
+
+binary: binary-indep binary-arch
+.PHONY: build clean binary-indep binary-arch binary install configure
diff --git a/debian/watch b/debian/watch
new file mode 100644
index 00000000..82cf9099
--- /dev/null
+++ b/debian/watch
@@ -0,0 +1,2 @@
+version=3
+http://www.cse.unsw.edu.au/~neilb/source/mdadm/mdadm-([.[:digit:]]+).tgz