summaryrefslogtreecommitdiff
path: root/tests/10ddf-fail-spare
diff options
context:
space:
mode:
authorMichael Tokarev <mjt@tls.msk.ru>2014-12-20 08:48:44 +0000
committerMichael Tokarev <mjt@tls.msk.ru>2014-12-20 08:48:44 +0000
commit489bea7ee8e1dbecfa517b8415568044ab57c73a (patch)
tree44d4878d4c7da3f4908ea9a765ef9b8f9c141756 /tests/10ddf-fail-spare
mdadm (3.3.2-5) unstable; urgency=medium
* use-tempnode-not-devnode.patch: change udev rules file to use $tempnode which works both on wheezy and jessie udev, instead of $devnode which only works in jessie. At this stage it is better to make rules file compatible with old version instead of adding versioned dependency. Should be removed for jessie+1. (Closes: #770883) * fix Closes: list in previous entry (Closes: #771852) # imported from the archive
Diffstat (limited to 'tests/10ddf-fail-spare')
-rw-r--r--tests/10ddf-fail-spare86
1 files changed, 86 insertions, 0 deletions
diff --git a/tests/10ddf-fail-spare b/tests/10ddf-fail-spare
new file mode 100644
index 00000000..ab737ca4
--- /dev/null
+++ b/tests/10ddf-fail-spare
@@ -0,0 +1,86 @@
+# Test suggested by Albert Pauw: Create, fail one disk, have mdmon
+# activate the spare,
+# then run create again. Shouldn't use the failed disk for Create,
+. tests/env-ddf-template
+
+tmp=$(mktemp /tmp/mdtest-XXXXXX)
+rm -f $tmp
+
+mdadm --zero-superblock $dev8 $dev9 $dev10 $dev11 $dev12 $dev13
+mdadm -CR $container -e ddf -l container -n 5 $dev8 $dev9 $dev10 $dev11 $dev12
+
+mdadm -CR $member0 -l raid1 -n 2 $container
+#$dir/mdadm -CR $member0 -l raid1 -n 2 $container >/tmp/mdmon.txt 2>&1
+
+check wait
+
+set -- $(get_raiddisks $member0)
+fail0=$1
+mdadm --fail $member0 $fail0
+
+# To make sure the spare is activated, we may have to sleep
+# 2s has always been enough for me
+sleep 2
+check wait
+
+# This test can succeed both ways - if spare was activated
+# before new array was created, we see only member 0.
+# otherwise, we see both, adn member0 is degraded because the
+# new array grabbed the spare
+# which case occurs depends on the sleep time above.
+ret=0
+if mdadm -CR $member1 -l raid5 -n 3 $container; then
+ # Creation successful - must have been quicker than spare activation
+
+ check wait
+ set -- $(get_raiddisks $member1)
+ if [ $1 = $fail0 -o $2 = $fail0 -o $3 = $fail0 ]; then
+ echo ERROR: $member1 must not contain $fail0: $@
+ ret=1
+ fi
+ d1=$1
+ mdadm -E $d1 >$tmp
+ if ! grep -q 'state\[1\] : Optimal, Consistent' $tmp; then
+ echo ERROR: member 1 should be optimal in meta data
+ ret=1
+ fi
+ state0=Degraded
+else
+ # Creation unsuccessful - spare was used for member 0
+ state0=Optimal
+fi
+
+# need to delay a little bit, sometimes the meta data aren't
+# up-to-date yet
+sleep 0.5
+set -- $(get_raiddisks $member0)
+if [ $1 = $fail0 -o $2 = $fail0 ]; then
+ echo ERROR: $member0 must not contain $fail0: $@
+ ret=1
+fi
+d0=$1
+
+[ -f $tmp ] || mdadm -E $d0 >$tmp
+
+if ! grep -q 'state\[0\] : '$state0', Consistent' $tmp; then
+ echo ERROR: member 0 should be $state0 in meta data
+ ret=1
+fi
+if ! grep -q 'Offline, Failed' $tmp; then
+ echo ERROR: Failed disk expected in meta data
+ ret=1
+fi
+if [ $ret -eq 1 ]; then
+ cat /proc/mdstat
+ mdadm -E $d0
+ mdadm -E $d1
+ mdadm -E $fail0
+fi
+
+[ -f /tmp/mdmon.txt ] && {
+ cat /tmp/mdmon.txt
+ rm -f /tmp/mdmon.txt
+}
+
+rm -f $tmp
+[ $ret -eq 0 ]