Bug#537717: mdadm assembles raid-arrays in initrd to early
tomdebian at gmx.de
tomdebian at gmx.de
Mon Jul 20 12:50:16 UTC 2009
Package: mdadm
Version: 2.6.9-3
Severity: normal
My Raid-Devices are started within initrd. But when mdadm starts to assemble the raid devices, the scsi-controller hasn't finished the
recognition of the local scsi devices => madadm fails for md0-md2
If I insert a "sleep 10" in scripts/local-top/mdadm just at the start:
MDADM=/sbin/mdadm
[ -x "$MDADM" ] || exit 0
# 09.07.09 Fix tr
sleep 15
verbose()
{
case "$quiet" in y*|Y*|1|t*|T*)
return 1;;
*)
return 0;;
esac
}
then everything work. Also if I stop initrd with the commandline-option break=mount, I see that the initrd stopps, then
after 1-2 seconds the detected scsi-devices are reported by the kernel. Then I can continue without problems.
It seem to be a timeing issue.
Hardwar:
4xSATA sda,sdb,sdc,sdd
4xSCSI sde,sdf,sdg,sdh
Tom
-- Package-specific info:
--- mount output
/dev/md1 on / type ext3 (rw,errors=remount-ro)
tmpfs on /lib/init/rw type tmpfs (rw,nosuid,mode=0755)
proc on /proc type proc (rw,noexec,nosuid,nodev)
sysfs on /sys type sysfs (rw,noexec,nosuid,nodev)
procbususb on /proc/bus/usb type usbfs (rw)
udev on /dev type tmpfs (rw,mode=0755)
tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev)
devpts on /dev/pts type devpts (rw,noexec,nosuid,gid=5,mode=620)
fusectl on /sys/fs/fuse/connections type fusectl (rw)
/dev/md0 on /boot type ext3 (rw,errors=remount-ro)
/dev/md2 on /vmware type ext3 (rw)
/dev/md7 on /archiv type ext3 (rw,errors=remount-ro)
/dev/md3 on /archiv/intern type ext3 (rw,errors=remount-ro)
/dev/md4 on /datentempel type ext3 (rw,errors=remount-ro)
/dev/md6 on /archiv/sc-archiv type ext3 (rw,errors=remount-ro)
/dev/md8 on /data type ext3 (rw,errors=remount-ro)
/dev/md9 on /amanda type ext3 (rw,errors=remount-ro)
/dev/md11 on /scratch type ext3 (rw,errors=remount-ro)
/dev/md10 on /debiancopy type ext3 (rw,errors=remount-ro)
binfmt_misc on /proc/sys/fs/binfmt_misc type binfmt_misc (rw,noexec,nosuid,nodev)
--- mdadm.conf
# mdadm.conf
#
# Please refer to mdadm.conf(5) for information about this file.
#
# by default, scan all partitions (/proc/partitions) for MD superblocks.
# alternatively, specify devices to scan, using wildcards if desired.
DEVICE partitions
# auto-create devices with Debian standard permissions
CREATE owner=root group=disk mode=0660 auto=yes
# automatically tag new arrays as belonging to the local system
HOMEHOST <system>
# instruct the monitoring daemon where to send mail alerts
MAILADDR root
# This file was auto-generated on Fri, 23 Feb 2007 16:02:42 +0000
# by mkconf $Id: mkconf 261 2006-11-09 13:32:35Z madduck $
#
ARRAY /dev/md0 level=raid1 num-devices=4 UUID=19f38bc6:763d266b:132783e8:19cdff95
ARRAY /dev/md1 level=raid10 num-devices=4 UUID=e4ff4a38:03e7f205:132783e8:19cdff95
ARRAY /dev/md2 level=raid10 num-devices=4 UUID=c2378f71:7ce2aff8:132783e8:19cdff95
ARRAY /dev/md3 level=raid10 num-devices=4 UUID=55eb7745:93ec3d64:02ebe09c:3f9c8f88
ARRAY /dev/md4 level=raid10 num-devices=4 UUID=0fd85265:9b33d39f:02ebe09c:3f9c8f88
ARRAY /dev/md6 level=raid10 num-devices=4 UUID=3e65df1c:40619cf5:02ebe09c:3f9c8f88
ARRAY /dev/md7 level=raid10 num-devices=4 UUID=827f0a18:996a60df:02ebe09c:3f9c8f88
ARRAY /dev/md8 level=raid10 num-devices=4 UUID=3ba9e1bd:50659501:02ebe09c:3f9c8f88
ARRAY /dev/md9 level=raid10 num-devices=4 UUID=07bc43be:4b6d0642:02ebe09c:3f9c8f88
ARRAY /dev/md10 level=raid1 num-devices=2 UUID=9855bb81:60f39571:02ebe09c:3f9c8f88
ARRAY /dev/md11 level=raid0 num-devices=2 UUID=ccd46f0b:1cf6f6cd:02ebe09c:3f9c8f88
--- /proc/mdstat:
Personalities : [raid0] [raid1] [raid10]
md11 : active raid0 sda10[0] sdc10[1]
98253312 blocks 64k chunks
md3 : active raid10 sdd1[0] sdc1[3] sda1[2] sdb1[1]
781256832 blocks 64K chunks 2 near-copies [4/4] [UUUU]
md4 : active raid10 sdd5[0] sdc5[3] sda5[2] sdb5[1]
390620288 blocks 64K chunks 2 near-copies [4/4] [UUUU]
md6 : active raid10 sdd6[0] sdc6[3] sda6[2] sdb6[1]
390620288 blocks 64K chunks 2 near-copies [4/4] [UUUU]
md7 : active raid10 sdd7[0] sdc7[3] sda7[2] sdb7[1]
390620288 blocks 64K chunks 2 near-copies [4/4] [UUUU]
md8 : active raid10 sdd8[0] sdc8[3] sda8[2] sdb8[1]
390620288 blocks 64K chunks 2 near-copies [4/4] [UUUU]
md9 : active raid10 sdd9[0] sdc9[3] sda9[2] sdb9[1]
488279424 blocks 64K chunks 2 near-copies [4/4] [UUUU]
md10 : active raid1 sdd10[0] sdb10[1]
49126656 blocks [2/2] [UU]
md2 : active raid10 sde6[0] sdh6[3] sdg6[2] sdf6[1]
182241152 blocks 64K chunks 2 near-copies [4/4] [UUUU]
md1 : active raid10 sde2[0] sdh2[3] sdg2[2] sdf2[1]
97658880 blocks 64K chunks 2 near-copies [4/4] [UUUU]
md0 : active raid1 sde1[0] sdh1[3] sdg1[2] sdf1[1]
489856 blocks [4/4] [UUUU]
unused devices: <none>
--- /proc/partitions:
major minor #blocks name
8 0 1465138584 sda
8 1 390628507 sda1
8 2 1 sda2
8 5 195310237 sda5
8 6 195310237 sda6
8 7 195310237 sda7
8 8 195310237 sda8
8 9 244139804 sda9
8 10 49126769 sda10
8 16 1465138584 sdb
8 17 390628507 sdb1
8 18 1 sdb2
8 21 195310237 sdb5
8 22 195310237 sdb6
8 23 195310237 sdb7
8 24 195310237 sdb8
8 25 244139804 sdb9
8 26 49126769 sdb10
8 32 1465138584 sdc
8 33 390628507 sdc1
8 34 1 sdc2
8 37 195310237 sdc5
8 38 195310237 sdc6
8 39 195310237 sdc7
8 40 195310237 sdc8
8 41 244139804 sdc9
8 42 49126769 sdc10
8 48 1465138584 sdd
8 49 390628507 sdd1
8 50 1 sdd2
8 53 195310237 sdd5
8 54 195310237 sdd6
8 55 195310237 sdd7
8 56 195310237 sdd8
8 57 244139804 sdd9
8 58 49126769 sdd10
8 64 143374744 sde
8 65 489982 sde1
8 66 48829567 sde2
8 67 1 sde3
8 69 2931831 sde5
8 70 91120648 sde6
8 80 143374744 sdf
8 81 489982 sdf1
8 82 48829567 sdf2
8 83 1 sdf3
8 85 2931831 sdf5
8 86 91120648 sdf6
8 96 143374744 sdg
8 97 489982 sdg1
8 98 48829567 sdg2
8 99 1 sdg3
8 101 2931831 sdg5
8 102 91120648 sdg6
8 112 143374744 sdh
8 113 489982 sdh1
8 114 48829567 sdh2
8 115 1 sdh3
8 117 2931831 sdh5
8 118 91120648 sdh6
9 0 489856 md0
9 1 97658880 md1
9 2 182241152 md2
9 10 49126656 md10
9 9 488279424 md9
9 8 390620288 md8
9 7 390620288 md7
9 6 390620288 md6
9 4 390620288 md4
9 3 781256832 md3
9 11 98253312 md11
--- initrd.img-2.6.30-bpo.1-amd64:
40692 blocks
dda4eec5226bf313f645bf9efa4d0815 ./lib/modules/2.6.30-bpo.1-amd64/kernel/drivers/md/raid6_pq.ko
376a62bde3c920a33002c179db02e875 ./lib/modules/2.6.30-bpo.1-amd64/kernel/drivers/md/raid1.ko
adde92b422a150b28087bf77fba2624c ./lib/modules/2.6.30-bpo.1-amd64/kernel/drivers/md/raid0.ko
521df681b8556305b4dccde4dd53bdb7 ./lib/modules/2.6.30-bpo.1-amd64/kernel/drivers/md/multipath.ko
1b88f0e7471b710e5ad4689663516ccd ./lib/modules/2.6.30-bpo.1-amd64/kernel/drivers/md/linear.ko
a707748fc2616068b1b717517d79564c ./lib/modules/2.6.30-bpo.1-amd64/kernel/drivers/md/md-mod.ko
4de3c6ce7a10a551e2d2bf76034bfcff ./lib/modules/2.6.30-bpo.1-amd64/kernel/drivers/md/raid456.ko
6ec9a7477ab673b0f67fe54f81cc3027 ./lib/modules/2.6.30-bpo.1-amd64/kernel/drivers/md/raid10.ko
c2a148d22b1d66cfa72cf0046fb6c97c ./etc/mdadm/mdadm.conf
4c7b56c7d7766efa92a8b5e1d2060a09 ./sbin/mdadm
9ca512979e62c92ada0ab8244a38f2a5 ./scripts/local-top/mdadm
--- /proc/modules:
dm_snapshot 22524 0 - Live 0xffffffffa0388000
dm_mirror 14504 0 - Live 0xffffffffa037f000
dm_region_hash 12688 1 dm_mirror, Live 0xffffffffa0379000
dm_log 9924 2 dm_mirror,dm_region_hash, Live 0xffffffffa0371000
dm_mod 59096 3 dm_snapshot,dm_mirror,dm_log, Live 0xffffffffa0360000
raid10 20448 8 - Live 0xffffffffa01db000
raid1 21072 2 - Live 0xffffffffa01d0000
raid0 7124 1 - Live 0xffffffffa01c9000
md_mod 86372 14 raid10,raid1,raid0, Live 0xffffffffa01ae000
megaraid_mbox 28896 0 - Live 0xffffffffa0035000
scsi_mod 158336 10 sg,sr_mod,sd_mod,st,osst,aic79xx,aic7xxx,scsi_transport_spi,libata,megaraid_mbox, Live 0xffffffffa0008000
megaraid_mm 9784 1 megaraid_mbox, Live 0xffffffffa0000000
-- debconf information:
mdadm/autostart: true
* mdadm/mail_to: root
mdadm/initrdstart_msg_errmd:
* mdadm/initrdstart: all
mdadm/initrdstart_msg_errconf:
mdadm/initrdstart_notinconf: false
mdadm/initrdstart_msg_errexist:
mdadm/initrdstart_msg_intro:
* mdadm/autocheck: true
mdadm/initrdstart_msg_errblock:
* mdadm/start_daemon: true
More information about the pkg-mdadm-devel
mailing list