[kernel] r10226 - in dists/trunk/linux-2.6/debian/patches: bugfix/all series

Maximilian Attems maks at alioth.debian.org
Sun Jan 27 21:43:30 UTC 2008


Author: maks
Date: Sun Jan 27 21:43:29 2008
New Revision: 10226

Log:
update to patch-2.6.24-git3

no further conflicts


Added:
   dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git3
      - copied, changed from r10225, /dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git2
Removed:
   dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git2
Modified:
   dists/trunk/linux-2.6/debian/patches/series/1~experimental.1

Copied: dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git3 (from r10225, /dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git2)
==============================================================================
--- /dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git2	(original)
+++ dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git3	Sun Jan 27 21:43:29 2008
@@ -11,6 +11,18 @@
  
  ###
  # The build process is as follows (targets):
+diff --git a/Documentation/DocBook/s390-drivers.tmpl b/Documentation/DocBook/s390-drivers.tmpl
+index 254e769..3d2f31b 100644
+--- a/Documentation/DocBook/s390-drivers.tmpl
++++ b/Documentation/DocBook/s390-drivers.tmpl
+@@ -116,6 +116,7 @@
+ !Iinclude/asm-s390/ccwdev.h
+ !Edrivers/s390/cio/device.c
+ !Edrivers/s390/cio/device_ops.c
++!Edrivers/s390/cio/airq.c
+     </sect1>
+     <sect1 id="cmf">
+      <title>The channel-measurement facility</title>
 diff --git a/Documentation/DocBook/scsi.tmpl b/Documentation/DocBook/scsi.tmpl
 new file mode 100644
 index 0000000..f299ab1
@@ -803,9 +815,18 @@
  
  o	"Free-Block Circulation": Shows the number of torture structures
 diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
-index a741f65..fb94f5a 100644
+index a741f65..ba0aacd 100644
 --- a/Documentation/cpu-hotplug.txt
 +++ b/Documentation/cpu-hotplug.txt
+@@ -50,7 +50,7 @@ additional_cpus=n (*)	Use this to limit hotpluggable cpus. This option sets
+   			cpu_possible_map = cpu_present_map + additional_cpus
+ 
+ (*) Option valid only for following architectures
+-- x86_64, ia64, s390
++- x86_64, ia64
+ 
+ ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT
+ to determine the number of potentially hot-pluggable cpus. The implementation
 @@ -109,12 +109,13 @@ Never use anything other than cpumask_t to represent bitmap of CPUs.
  	for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask.
  
@@ -982,6 +1003,51 @@
 +			large, the fs will silently revert it to the default.
 +			Localalloc is not enabled for local mounts.
 +localflocks		This disables cluster aware flock.
+diff --git a/Documentation/ide.txt b/Documentation/ide.txt
+index 1d50f23..94e2e3b 100644
+--- a/Documentation/ide.txt
++++ b/Documentation/ide.txt
+@@ -30,7 +30,7 @@
+ ***
+ ***  The CMD640 is also used on some Vesa Local Bus (VLB) cards, and is *NOT*
+ ***  automatically detected by Linux.  For safe, reliable operation with such
+-***  interfaces, one *MUST* use the "ide0=cmd640_vlb" kernel option.
++***  interfaces, one *MUST* use the "cmd640.probe_vlb" kernel option.
+ ***
+ ***  Use of the "serialize" option is no longer necessary.
+ 
+@@ -244,10 +244,6 @@ Summary of ide driver parameters for kernel command line
+ 
+  "hdx=nodma"		: disallow DMA
+ 
+- "hdx=swapdata"		: when the drive is a disk, byte swap all data
+-
+- "hdx=bswap"		: same as above..........
+-
+  "hdx=scsi"		: the return of the ide-scsi flag, this is useful for
+  			  allowing ide-floppy, ide-tape, and ide-cdrom|writers
+  			  to use ide-scsi emulation on a device specific option.
+@@ -292,9 +288,6 @@ The following are valid ONLY on ide0, which usually corresponds
+ to the first ATA interface found on the particular host, and the defaults for
+ the base,ctl ports must not be altered.
+ 
+- "ide0=cmd640_vlb"	: *REQUIRED* for VLB cards with the CMD640 chip
+-			  (not for PCI -- automatically detected)
+-
+  "ide=doubler"		: probe/support IDE doublers on Amiga
+ 
+ There may be more options than shown -- use the source, Luke!
+@@ -310,6 +303,10 @@ i.e. to enable probing for ALI M14xx chipsets (ali14xx host driver) use:
+ * "probe" module parameter when ali14xx driver is compiled as module
+   ("modprobe ali14xx probe")
+ 
++Also for legacy CMD640 host driver (cmd640) you need to use "probe_vlb"
++kernel paremeter to enable probing for VLB version of the chipset (PCI ones
++are detected automatically).
++
+ ================================================================================
+ 
+ IDE ATAPI streaming tape driver
 diff --git a/Documentation/ioctl-number.txt b/Documentation/ioctl-number.txt
 index 5c7fbf9..c18363b 100644
 --- a/Documentation/ioctl-number.txt
@@ -995,7 +1061,7 @@
  'p'	00-3F	linux/mc146818rtc.h	conflict!
  'p'	40-7F	linux/nvram.h
 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index c417877..65de5ba 100644
+index c417877..880f882 100644
 --- a/Documentation/kernel-parameters.txt
 +++ b/Documentation/kernel-parameters.txt
 @@ -34,6 +34,7 @@ parameter is applicable:
@@ -1006,7 +1072,30 @@
  	AX25	Appropriate AX.25 support is enabled.
  	BLACKFIN Blackfin architecture is enabled.
  	DRM	Direct Rendering Management support is enabled.
-@@ -1123,6 +1124,10 @@ and is between 256 and 4096 characters. It is defined in the file
+@@ -369,7 +370,8 @@ and is between 256 and 4096 characters. It is defined in the file
+ 			configured.  Potentially dangerous and should only be
+ 			used if you are entirely sure of the consequences.
+ 
+-	chandev=	[HW,NET] Generic channel device initialisation
++	ccw_timeout_log [S390]
++			See Documentation/s390/CommonIO for details.
+ 
+ 	checkreqprot	[SELINUX] Set initial checkreqprot flag value.
+ 			Format: { "0" | "1" }
+@@ -381,6 +383,12 @@ and is between 256 and 4096 characters. It is defined in the file
+ 			Value can be changed at runtime via
+ 				/selinux/checkreqprot.
+ 
++	cio_ignore=	[S390]
++			See Documentation/s390/CommonIO for details.
++
++	cio_msg=	[S390]
++			See Documentation/s390/CommonIO for details.
++
+ 	clock=		[BUGS=X86-32, HW] gettimeofday clocksource override.
+ 			[Deprecated]
+ 			Forces specified clocksource (if available) to be used
+@@ -1123,6 +1131,10 @@ and is between 256 and 4096 characters. It is defined in the file
  			of returning the full 64-bit number.
  			The default is to return 64-bit inode numbers.
  
@@ -1017,7 +1106,7 @@
  	nmi_watchdog=	[KNL,BUGS=X86-32] Debugging features for SMP kernels
  
  	no387		[BUGS=X86-32] Tells the kernel to use the 387 maths
-@@ -1593,7 +1598,13 @@ and is between 256 and 4096 characters. It is defined in the file
+@@ -1593,7 +1605,13 @@ and is between 256 and 4096 characters. It is defined in the file
  			Format: <vendor>:<model>:<flags>
  			(flags are integer value)
  
@@ -1706,6 +1795,22 @@
  device's directory:
  id - displays a list of support EISA IDs
  options - displays possible resource configurations
+diff --git a/Documentation/s390/CommonIO b/Documentation/s390/CommonIO
+index 86320aa..8fbc0a8 100644
+--- a/Documentation/s390/CommonIO
++++ b/Documentation/s390/CommonIO
+@@ -4,6 +4,11 @@ S/390 common I/O-Layer - command line parameters, procfs and debugfs entries
+ Command line parameters
+ -----------------------
+ 
++* ccw_timeout_log
++
++  Enable logging of debug information in case of ccw device timeouts.
++
++
+ * cio_msg = yes | no
+   
+   Determines whether information on found devices and sensed device 
 diff --git a/Documentation/s390/cds.txt b/Documentation/s390/cds.txt
 index 3081927..c4b7b2b 100644
 --- a/Documentation/s390/cds.txt
@@ -14870,8 +14975,88 @@
  };
  
  static struct sys_device device_openpic2 = {
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index 1330061..6ef54d2 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -276,9 +276,6 @@ source "kernel/Kconfig.preempt"
+ 
+ source "mm/Kconfig"
+ 
+-config HOLES_IN_ZONE
+-	def_bool y
+-
+ comment "I/O subsystem configuration"
+ 
+ config MACHCHK_WARNING
+diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig
+deleted file mode 100644
+index d1defbb..0000000
+--- a/arch/s390/crypto/Kconfig
++++ /dev/null
+@@ -1,60 +0,0 @@
+-config CRYPTO_SHA1_S390
+-	tristate "SHA1 digest algorithm"
+-	depends on S390
+-	select CRYPTO_ALGAPI
+-	help
+-	  This is the s390 hardware accelerated implementation of the
+-	  SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
+-
+-config CRYPTO_SHA256_S390
+-	tristate "SHA256 digest algorithm"
+-	depends on S390
+-	select CRYPTO_ALGAPI
+-	help
+-	  This is the s390 hardware accelerated implementation of the
+-	  SHA256 secure hash standard (DFIPS 180-2).
+-
+-	  This version of SHA implements a 256 bit hash with 128 bits of
+-	  security against collision attacks.
+-
+-config CRYPTO_DES_S390
+-	tristate "DES and Triple DES cipher algorithms"
+-	depends on S390
+-	select CRYPTO_ALGAPI
+-	select CRYPTO_BLKCIPHER
+-	help
+-	  This us the s390 hardware accelerated implementation of the
+-	  DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
+-
+-config CRYPTO_AES_S390
+-	tristate "AES cipher algorithms"
+-	depends on S390
+-	select CRYPTO_ALGAPI
+-	select CRYPTO_BLKCIPHER
+-	help
+-	  This is the s390 hardware accelerated implementation of the
+-	  AES cipher algorithms (FIPS-197). AES uses the Rijndael
+-	  algorithm.
+-
+-	  Rijndael appears to be consistently a very good performer in
+-	  both hardware and software across a wide range of computing
+-	  environments regardless of its use in feedback or non-feedback
+-	  modes. Its key setup time is excellent, and its key agility is
+-	  good. Rijndael's very low memory requirements make it very well
+-	  suited for restricted-space environments, in which it also
+-	  demonstrates excellent performance. Rijndael's operations are
+-	  among the easiest to defend against power and timing attacks.
+-
+-	  On s390 the System z9-109 currently only supports the key size
+-	  of 128 bit.
+-
+-config S390_PRNG
+-	tristate "Pseudo random number generator device driver"
+-	depends on S390
+-	default "m"
+-	help
+-	  Select this option if you want to use the s390 pseudo random number
+-	  generator. The PRNG is part of the cryptographic processor functions
+-	  and uses triple-DES to generate secure random numbers like the
+-	  ANSI X9.17 standard. The PRNG is usable via the char device
+-	  /dev/prandom.
 diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
-index 5126696..46c9705 100644
+index 5126696..a3f67f8 100644
 --- a/arch/s390/crypto/aes_s390.c
 +++ b/arch/s390/crypto/aes_s390.c
 @@ -6,6 +6,7 @@
@@ -15239,7 +15424,8 @@
 -		cbc_aes_alg.cra_u.blkcipher.max_keysize = AES_MIN_KEY_SIZE;
 +	if (keylen_flag == AES_KEYLEN_128)
  		printk(KERN_INFO
- 		       "aes_s390: hardware acceleration only available for"
+-		       "aes_s390: hardware acceleration only available for"
++		       "aes_s390: hardware acceleration only available for "
  		       "128 bit keys\n");
 -	}
  
@@ -15250,6 +15436,28 @@
  MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
  MODULE_LICENSE("GPL");
 -
+diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
+index 8eb3a1a..0cfefdd 100644
+--- a/arch/s390/crypto/prng.c
++++ b/arch/s390/crypto/prng.c
+@@ -90,7 +90,7 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
+ 	int ret = 0;
+ 	int tmp;
+ 
+-	/* nbytes can be arbitrary long, we spilt it into chunks */
++	/* nbytes can be arbitrary length, we split it into chunks */
+ 	while (nbytes) {
+ 		/* same as in extract_entropy_user in random.c */
+ 		if (need_resched()) {
+@@ -146,7 +146,7 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
+ 	return ret;
+ }
+ 
+-static struct file_operations prng_fops = {
++static const struct file_operations prng_fops = {
+ 	.owner		= THIS_MODULE,
+ 	.open		= &prng_open,
+ 	.release	= NULL,
 diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
 index 5245717..4b010ff 100644
 --- a/arch/s390/hypfs/inode.c
@@ -15295,11 +15503,153 @@
  }
  
  module_init(hypfs_init)
+diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
+index 56cb710..b3b650a 100644
+--- a/arch/s390/kernel/Makefile
++++ b/arch/s390/kernel/Makefile
+@@ -31,7 +31,3 @@ S390_KEXEC_OBJS := machine_kexec.o crash.o
+ S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
+ obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS)
+ 
+-#
+-# This is just to get the dependencies...
+-#
+-binfmt_elf32.o:	$(TOPDIR)/fs/binfmt_elf.c
+diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
+index 1b3af7d..9f7b73b 100644
+--- a/arch/s390/kernel/early.c
++++ b/arch/s390/kernel/early.c
+@@ -276,7 +276,7 @@ void __init startup_init(void)
+ 	create_kernel_nss();
+ 	sort_main_extable();
+ 	setup_lowcore_early();
+-	sclp_readinfo_early();
++	sclp_read_info_early();
+ 	sclp_facilities_detect();
+ 	memsize = sclp_memory_detect();
+ #ifndef CONFIG_64BIT
+diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
+index a87b197..79dccd2 100644
+--- a/arch/s390/kernel/head64.S
++++ b/arch/s390/kernel/head64.S
+@@ -157,7 +157,7 @@ startup_continue:
+ 	.long	0xb2b10000		# store facility list
+ 	tm	0xc8,0x08		# check bit for clearing-by-ASCE
+ 	bno	0f-.LPG1(%r13)
+-	lhi	%r1,2094
++	lhi	%r1,2048
+ 	lhi	%r2,0
+ 	.long	0xb98e2001
+ 	oi	7(%r12),0x80		# set IDTE flag
 diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
-index ce0856d..b97694f 100644
+index ce0856d..db28cca 100644
 --- a/arch/s390/kernel/ipl.c
 +++ b/arch/s390/kernel/ipl.c
-@@ -162,22 +162,25 @@ EXPORT_SYMBOL_GPL(diag308);
+@@ -2,7 +2,7 @@
+  *  arch/s390/kernel/ipl.c
+  *    ipl/reipl/dump support for Linux on s390.
+  *
+- *    Copyright (C) IBM Corp. 2005,2006
++ *    Copyright IBM Corp. 2005,2007
+  *    Author(s): Michael Holzheu <holzheu at de.ibm.com>
+  *		 Heiko Carstens <heiko.carstens at de.ibm.com>
+  *		 Volker Sameske <sameske at de.ibm.com>
+@@ -31,6 +31,43 @@
+ #define IPL_FCP_DUMP_STR	"fcp_dump"
+ #define IPL_NSS_STR		"nss"
+ 
++#define DUMP_CCW_STR		"ccw"
++#define DUMP_FCP_STR		"fcp"
++#define DUMP_NONE_STR		"none"
++
++/*
++ * Four shutdown trigger types are supported:
++ * - panic
++ * - halt
++ * - power off
++ * - reipl
++ */
++#define ON_PANIC_STR		"on_panic"
++#define ON_HALT_STR		"on_halt"
++#define ON_POFF_STR		"on_poff"
++#define ON_REIPL_STR		"on_reboot"
++
++struct shutdown_action;
++struct shutdown_trigger {
++	char *name;
++	struct shutdown_action *action;
++};
++
++/*
++ * Five shutdown action types are supported:
++ */
++#define SHUTDOWN_ACTION_IPL_STR		"ipl"
++#define SHUTDOWN_ACTION_REIPL_STR	"reipl"
++#define SHUTDOWN_ACTION_DUMP_STR	"dump"
++#define SHUTDOWN_ACTION_VMCMD_STR	"vmcmd"
++#define SHUTDOWN_ACTION_STOP_STR	"stop"
++
++struct shutdown_action {
++	char *name;
++	void (*fn) (struct shutdown_trigger *trigger);
++	int (*init) (void);
++};
++
+ static char *ipl_type_str(enum ipl_type type)
+ {
+ 	switch (type) {
+@@ -54,10 +91,6 @@ enum dump_type {
+ 	DUMP_TYPE_FCP	= 4,
+ };
+ 
+-#define DUMP_NONE_STR	 "none"
+-#define DUMP_CCW_STR	 "ccw"
+-#define DUMP_FCP_STR	 "fcp"
+-
+ static char *dump_type_str(enum dump_type type)
+ {
+ 	switch (type) {
+@@ -99,30 +132,6 @@ enum dump_method {
+ 	DUMP_METHOD_FCP_DIAG,
+ };
+ 
+-enum shutdown_action {
+-	SHUTDOWN_REIPL,
+-	SHUTDOWN_DUMP,
+-	SHUTDOWN_STOP,
+-};
+-
+-#define SHUTDOWN_REIPL_STR "reipl"
+-#define SHUTDOWN_DUMP_STR  "dump"
+-#define SHUTDOWN_STOP_STR  "stop"
+-
+-static char *shutdown_action_str(enum shutdown_action action)
+-{
+-	switch (action) {
+-	case SHUTDOWN_REIPL:
+-		return SHUTDOWN_REIPL_STR;
+-	case SHUTDOWN_DUMP:
+-		return SHUTDOWN_DUMP_STR;
+-	case SHUTDOWN_STOP:
+-		return SHUTDOWN_STOP_STR;
+-	default:
+-		return NULL;
+-	}
+-}
+-
+ static int diag308_set_works = 0;
+ 
+ static int reipl_capabilities = IPL_TYPE_UNKNOWN;
+@@ -140,8 +149,6 @@ static enum dump_method dump_method = DUMP_METHOD_NONE;
+ static struct ipl_parameter_block *dump_block_fcp;
+ static struct ipl_parameter_block *dump_block_ccw;
+ 
+-static enum shutdown_action on_panic_action = SHUTDOWN_STOP;
+-
+ static struct sclp_ipl_info sclp_ipl_info;
+ 
+ int diag308(unsigned long subcode, void *addr)
+@@ -162,22 +169,25 @@ EXPORT_SYMBOL_GPL(diag308);
  /* SYSFS */
  
  #define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value)		\
@@ -15329,7 +15679,7 @@
  		const char *buf, size_t len)				\
  {									\
  	unsigned long long value;					\
-@@ -186,25 +189,27 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kset *kset,	\
+@@ -186,25 +196,27 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kset *kset,	\
  	_value = value;							\
  	return len;							\
  }									\
@@ -15352,8 +15702,10 @@
 +		struct kobj_attribute *attr,				\
  		const char *buf, size_t len)				\
  {									\
- 	if (sscanf(buf, _fmt_in, _value) != 1)				\
- 		return -EINVAL;						\
+-	if (sscanf(buf, _fmt_in, _value) != 1)				\
+-		return -EINVAL;						\
++	strncpy(_value, buf, sizeof(_value) - 1);			\
++	strstrip(_value);						\
  	return len;							\
  }									\
 -static struct subsys_attribute sys_##_prefix##_##_name##_attr =		\
@@ -15361,7 +15713,37 @@
  	__ATTR(_name,(S_IRUGO | S_IWUSR),				\
  			sys_##_prefix##_##_name##_show,			\
  			sys_##_prefix##_##_name##_store);
-@@ -270,14 +275,16 @@ void __init setup_ipl_info(void)
+@@ -240,44 +252,19 @@ static __init enum ipl_type get_ipl_type(void)
+ 	return IPL_TYPE_FCP;
+ }
+ 
+-void __init setup_ipl_info(void)
+-{
+-	ipl_info.type = get_ipl_type();
+-	switch (ipl_info.type) {
+-	case IPL_TYPE_CCW:
+-		ipl_info.data.ccw.dev_id.devno = ipl_devno;
+-		ipl_info.data.ccw.dev_id.ssid = 0;
+-		break;
+-	case IPL_TYPE_FCP:
+-	case IPL_TYPE_FCP_DUMP:
+-		ipl_info.data.fcp.dev_id.devno =
+-			IPL_PARMBLOCK_START->ipl_info.fcp.devno;
+-		ipl_info.data.fcp.dev_id.ssid = 0;
+-		ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
+-		ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
+-		break;
+-	case IPL_TYPE_NSS:
+-		strncpy(ipl_info.data.nss.name, kernel_nss_name,
+-			sizeof(ipl_info.data.nss.name));
+-		break;
+-	case IPL_TYPE_UNKNOWN:
+-	default:
+-		/* We have no info to copy */
+-		break;
+-	}
+-}
+-
  struct ipl_info ipl_info;
  EXPORT_SYMBOL_GPL(ipl_info);
  
@@ -15381,7 +15763,7 @@
  {
  	struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
  
-@@ -292,7 +299,7 @@ static ssize_t sys_ipl_device_show(struct kset *kset, char *page)
+@@ -292,7 +279,7 @@ static ssize_t sys_ipl_device_show(struct kset *kset, char *page)
  	}
  }
  
@@ -15390,7 +15772,7 @@
  	__ATTR(device, S_IRUGO, sys_ipl_device_show, NULL);
  
  static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr,
-@@ -367,7 +374,8 @@ static struct attribute_group ipl_fcp_attr_group = {
+@@ -367,7 +354,8 @@ static struct attribute_group ipl_fcp_attr_group = {
  
  /* CCW ipl device attributes */
  
@@ -15400,7 +15782,7 @@
  {
  	char loadparm[LOADPARM_LEN + 1] = {};
  
-@@ -379,7 +387,7 @@ static ssize_t ipl_ccw_loadparm_show(struct kset *kset, char *page)
+@@ -379,7 +367,7 @@ static ssize_t ipl_ccw_loadparm_show(struct kset *kset, char *page)
  	return sprintf(page, "%s\n", loadparm);
  }
  
@@ -15409,16 +15791,86 @@
  	__ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
  
  static struct attribute *ipl_ccw_attrs[] = {
-@@ -418,7 +426,7 @@ static struct attribute_group ipl_unknown_attr_group = {
+@@ -418,10 +406,76 @@ static struct attribute_group ipl_unknown_attr_group = {
  	.attrs = ipl_unknown_attrs,
  };
  
 -static decl_subsys(ipl, NULL, NULL);
 +static struct kset *ipl_kset;
++
++static int __init ipl_register_fcp_files(void)
++{
++	int rc;
++
++	rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
++	if (rc)
++		goto out;
++	rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
++	if (rc)
++		goto out_ipl_parm;
++	rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_scp_data_attr);
++	if (!rc)
++		goto out;
++
++	sysfs_remove_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
++
++out_ipl_parm:
++	sysfs_remove_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
++out:
++	return rc;
++}
++
++static void ipl_run(struct shutdown_trigger *trigger)
++{
++	diag308(DIAG308_IPL, NULL);
++	if (MACHINE_IS_VM)
++		__cpcmd("IPL", NULL, 0, NULL);
++	else if (ipl_info.type == IPL_TYPE_CCW)
++		reipl_ccw_dev(&ipl_info.data.ccw.dev_id);
++}
++
++static int ipl_init(void)
++{
++	int rc;
++
++	ipl_kset = kset_create_and_add("ipl", NULL, firmware_kobj);
++	if (!ipl_kset) {
++		rc = -ENOMEM;
++		goto out;
++	}
++	switch (ipl_info.type) {
++	case IPL_TYPE_CCW:
++		rc = sysfs_create_group(&ipl_kset->kobj, &ipl_ccw_attr_group);
++		break;
++	case IPL_TYPE_FCP:
++	case IPL_TYPE_FCP_DUMP:
++		rc = ipl_register_fcp_files();
++		break;
++	case IPL_TYPE_NSS:
++		rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nss_attr_group);
++		break;
++	default:
++		rc = sysfs_create_group(&ipl_kset->kobj,
++					&ipl_unknown_attr_group);
++		break;
++	}
++out:
++	if (rc)
++		panic("ipl_init failed: rc = %i\n", rc);
++
++	return 0;
++}
++
++static struct shutdown_action ipl_action = {SHUTDOWN_ACTION_IPL_STR, ipl_run,
++					    ipl_init};
  
  /*
-  * reipl section
-@@ -465,7 +473,8 @@ static void reipl_get_ascii_loadparm(char *loadparm)
+- * reipl section
++ * reipl shutdown action: Reboot Linux on shutdown.
+  */
+ 
+ /* FCP reipl device attributes */
+@@ -465,7 +519,8 @@ static void reipl_get_ascii_loadparm(char *loadparm)
  	strstrip(loadparm);
  }
  
@@ -15428,7 +15880,7 @@
  {
  	char buf[LOADPARM_LEN + 1];
  
-@@ -473,7 +482,8 @@ static ssize_t reipl_ccw_loadparm_show(struct kset *kset, char *page)
+@@ -473,7 +528,8 @@ static ssize_t reipl_ccw_loadparm_show(struct kset *kset, char *page)
  	return sprintf(page, "%s\n", buf);
  }
  
@@ -15438,7 +15890,7 @@
  					const char *buf, size_t len)
  {
  	int i, lp_len;
-@@ -500,7 +510,7 @@ static ssize_t reipl_ccw_loadparm_store(struct kset *kset,
+@@ -500,7 +556,7 @@ static ssize_t reipl_ccw_loadparm_store(struct kset *kset,
  	return len;
  }
  
@@ -15447,7 +15899,18 @@
  	__ATTR(loadparm, 0644, reipl_ccw_loadparm_show,
  	       reipl_ccw_loadparm_store);
  
-@@ -568,13 +578,15 @@ static int reipl_set_type(enum ipl_type type)
+@@ -539,7 +595,9 @@ static int reipl_set_type(enum ipl_type type)
+ 
+ 	switch(type) {
+ 	case IPL_TYPE_CCW:
+-		if (MACHINE_IS_VM)
++		if (diag308_set_works)
++			reipl_method = REIPL_METHOD_CCW_DIAG;
++		else if (MACHINE_IS_VM)
+ 			reipl_method = REIPL_METHOD_CCW_VM;
+ 		else
+ 			reipl_method = REIPL_METHOD_CCW_CIO;
+@@ -568,13 +626,15 @@ static int reipl_set_type(enum ipl_type type)
  	return 0;
  }
  
@@ -15466,149 +15929,260 @@
  {
  	int rc = -EINVAL;
  
-@@ -587,10 +599,10 @@ static ssize_t reipl_type_store(struct kset *kset, const char *buf,
+@@ -587,140 +647,12 @@ static ssize_t reipl_type_store(struct kset *kset, const char *buf,
  	return (rc != 0) ? rc : len;
  }
  
 -static struct subsys_attribute reipl_type_attr =
-+static struct kobj_attribute reipl_type_attr =
- 		__ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
- 
+-		__ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
+-
 -static decl_subsys(reipl, NULL, NULL);
-+static struct kset *reipl_kset;
- 
- /*
-  * dump section
-@@ -663,13 +675,15 @@ static int dump_set_type(enum dump_type type)
- 	return 0;
- }
- 
+-
+-/*
+- * dump section
+- */
+-
+-/* FCP dump device attributes */
+-
+-DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n",
+-		   dump_block_fcp->ipl_info.fcp.wwpn);
+-DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n",
+-		   dump_block_fcp->ipl_info.fcp.lun);
+-DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
+-		   dump_block_fcp->ipl_info.fcp.bootprog);
+-DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
+-		   dump_block_fcp->ipl_info.fcp.br_lba);
+-DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
+-		   dump_block_fcp->ipl_info.fcp.devno);
+-
+-static struct attribute *dump_fcp_attrs[] = {
+-	&sys_dump_fcp_device_attr.attr,
+-	&sys_dump_fcp_wwpn_attr.attr,
+-	&sys_dump_fcp_lun_attr.attr,
+-	&sys_dump_fcp_bootprog_attr.attr,
+-	&sys_dump_fcp_br_lba_attr.attr,
+-	NULL,
+-};
+-
+-static struct attribute_group dump_fcp_attr_group = {
+-	.name  = IPL_FCP_STR,
+-	.attrs = dump_fcp_attrs,
+-};
+-
+-/* CCW dump device attributes */
+-
+-DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
+-		   dump_block_ccw->ipl_info.ccw.devno);
+-
+-static struct attribute *dump_ccw_attrs[] = {
+-	&sys_dump_ccw_device_attr.attr,
+-	NULL,
+-};
+-
+-static struct attribute_group dump_ccw_attr_group = {
+-	.name  = IPL_CCW_STR,
+-	.attrs = dump_ccw_attrs,
+-};
+-
+-/* dump type */
+-
+-static int dump_set_type(enum dump_type type)
+-{
+-	if (!(dump_capabilities & type))
+-		return -EINVAL;
+-	switch(type) {
+-	case DUMP_TYPE_CCW:
+-		if (MACHINE_IS_VM)
+-			dump_method = DUMP_METHOD_CCW_VM;
+-		else if (diag308_set_works)
+-			dump_method = DUMP_METHOD_CCW_DIAG;
+-		else
+-			dump_method = DUMP_METHOD_CCW_CIO;
+-		break;
+-	case DUMP_TYPE_FCP:
+-		dump_method = DUMP_METHOD_FCP_DIAG;
+-		break;
+-	default:
+-		dump_method = DUMP_METHOD_NONE;
+-	}
+-	dump_type = type;
+-	return 0;
+-}
+-
 -static ssize_t dump_type_show(struct kset *kset, char *page)
-+static ssize_t dump_type_show(struct kobject *kobj,
-+			      struct kobj_attribute *attr, char *page)
- {
- 	return sprintf(page, "%s\n", dump_type_str(dump_type));
- }
- 
+-{
+-	return sprintf(page, "%s\n", dump_type_str(dump_type));
+-}
+-
 -static ssize_t dump_type_store(struct kset *kset, const char *buf,
 -			       size_t len)
-+static ssize_t dump_type_store(struct kobject *kobj,
-+			       struct kobj_attribute *attr,
-+			       const char *buf, size_t len)
- {
- 	int rc = -EINVAL;
- 
-@@ -682,26 +696,28 @@ static ssize_t dump_type_store(struct kset *kset, const char *buf,
- 	return (rc != 0) ? rc : len;
- }
- 
+-{
+-	int rc = -EINVAL;
+-
+-	if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0)
+-		rc = dump_set_type(DUMP_TYPE_NONE);
+-	else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0)
+-		rc = dump_set_type(DUMP_TYPE_CCW);
+-	else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0)
+-		rc = dump_set_type(DUMP_TYPE_FCP);
+-	return (rc != 0) ? rc : len;
+-}
+-
 -static struct subsys_attribute dump_type_attr =
-+static struct kobj_attribute dump_type_attr =
- 		__ATTR(dump_type, 0644, dump_type_show, dump_type_store);
- 
+-		__ATTR(dump_type, 0644, dump_type_show, dump_type_store);
+-
 -static decl_subsys(dump, NULL, NULL);
-+static struct kset *dump_kset;
- 
- /*
-  * Shutdown actions section
-  */
- 
+-
+-/*
+- * Shutdown actions section
+- */
+-
 -static decl_subsys(shutdown_actions, NULL, NULL);
-+static struct kset *shutdown_actions_kset;
- 
- /* on panic */
- 
+-
+-/* on panic */
+-
 -static ssize_t on_panic_show(struct kset *kset, char *page)
-+static ssize_t on_panic_show(struct kobject *kobj,
-+			     struct kobj_attribute *attr, char *page)
- {
- 	return sprintf(page, "%s\n", shutdown_action_str(on_panic_action));
- }
- 
+-{
+-	return sprintf(page, "%s\n", shutdown_action_str(on_panic_action));
+-}
+-
 -static ssize_t on_panic_store(struct kset *kset, const char *buf,
 -			      size_t len)
-+static ssize_t on_panic_store(struct kobject *kobj,
-+			      struct kobj_attribute *attr,
-+			      const char *buf, size_t len)
- {
- 	if (strncmp(buf, SHUTDOWN_REIPL_STR, strlen(SHUTDOWN_REIPL_STR)) == 0)
- 		on_panic_action = SHUTDOWN_REIPL;
-@@ -717,7 +733,7 @@ static ssize_t on_panic_store(struct kset *kset, const char *buf,
- 	return len;
- }
+-{
+-	if (strncmp(buf, SHUTDOWN_REIPL_STR, strlen(SHUTDOWN_REIPL_STR)) == 0)
+-		on_panic_action = SHUTDOWN_REIPL;
+-	else if (strncmp(buf, SHUTDOWN_DUMP_STR,
+-			 strlen(SHUTDOWN_DUMP_STR)) == 0)
+-		on_panic_action = SHUTDOWN_DUMP;
+-	else if (strncmp(buf, SHUTDOWN_STOP_STR,
+-			 strlen(SHUTDOWN_STOP_STR)) == 0)
+-		on_panic_action = SHUTDOWN_STOP;
+-	else
+-		return -EINVAL;
+-
+-	return len;
+-}
++static struct kobj_attribute reipl_type_attr =
++	__ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
  
 -static struct subsys_attribute on_panic_attr =
-+static struct kobj_attribute on_panic_attr =
- 		__ATTR(on_panic, 0644, on_panic_show, on_panic_store);
+-		__ATTR(on_panic, 0644, on_panic_show, on_panic_store);
++static struct kset *reipl_kset;
  
- void do_reipl(void)
-@@ -814,23 +830,23 @@ static int __init ipl_register_fcp_files(void)
+-void do_reipl(void)
++void reipl_run(struct shutdown_trigger *trigger)
  {
- 	int rc;
- 
+ 	struct ccw_dev_id devid;
+ 	static char buf[100];
+@@ -729,8 +661,6 @@ void do_reipl(void)
+ 	switch (reipl_method) {
+ 	case REIPL_METHOD_CCW_CIO:
+ 		devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
+-		if (ipl_info.type == IPL_TYPE_CCW && devid.devno == ipl_devno)
+-			diag308(DIAG308_IPL, NULL);
+ 		devid.ssid  = 0;
+ 		reipl_ccw_dev(&devid);
+ 		break;
+@@ -771,98 +701,6 @@ void do_reipl(void)
+ 	default:
+ 		break;
+ 	}
+-	signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+-}
+-
+-static void do_dump(void)
+-{
+-	struct ccw_dev_id devid;
+-	static char buf[100];
+-
+-	switch (dump_method) {
+-	case DUMP_METHOD_CCW_CIO:
+-		smp_send_stop();
+-		devid.devno = dump_block_ccw->ipl_info.ccw.devno;
+-		devid.ssid  = 0;
+-		reipl_ccw_dev(&devid);
+-		break;
+-	case DUMP_METHOD_CCW_VM:
+-		smp_send_stop();
+-		sprintf(buf, "STORE STATUS");
+-		__cpcmd(buf, NULL, 0, NULL);
+-		sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
+-		__cpcmd(buf, NULL, 0, NULL);
+-		break;
+-	case DUMP_METHOD_CCW_DIAG:
+-		diag308(DIAG308_SET, dump_block_ccw);
+-		diag308(DIAG308_DUMP, NULL);
+-		break;
+-	case DUMP_METHOD_FCP_DIAG:
+-		diag308(DIAG308_SET, dump_block_fcp);
+-		diag308(DIAG308_DUMP, NULL);
+-		break;
+-	case DUMP_METHOD_NONE:
+-	default:
+-		return;
+-	}
+-	printk(KERN_EMERG "Dump failed!\n");
+-}
+-
+-/* init functions */
+-
+-static int __init ipl_register_fcp_files(void)
+-{
+-	int rc;
+-
 -	rc = sysfs_create_group(&ipl_subsys.kobj,
-+	rc = sysfs_create_group(&ipl_kset->kobj,
- 				&ipl_fcp_attr_group);
- 	if (rc)
- 		goto out;
+-				&ipl_fcp_attr_group);
+-	if (rc)
+-		goto out;
 -	rc = sysfs_create_bin_file(&ipl_subsys.kobj,
-+	rc = sysfs_create_bin_file(&ipl_kset->kobj,
- 				   &ipl_parameter_attr);
- 	if (rc)
- 		goto out_ipl_parm;
+-				   &ipl_parameter_attr);
+-	if (rc)
+-		goto out_ipl_parm;
 -	rc = sysfs_create_bin_file(&ipl_subsys.kobj,
-+	rc = sysfs_create_bin_file(&ipl_kset->kobj,
- 				   &ipl_scp_data_attr);
- 	if (!rc)
- 		goto out;
- 
+-				   &ipl_scp_data_attr);
+-	if (!rc)
+-		goto out;
+-
 -	sysfs_remove_bin_file(&ipl_subsys.kobj, &ipl_parameter_attr);
-+	sysfs_remove_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
- 
- out_ipl_parm:
+-
+-out_ipl_parm:
 -	sysfs_remove_group(&ipl_subsys.kobj, &ipl_fcp_attr_group);
-+	sysfs_remove_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
- out:
- 	return rc;
- }
-@@ -839,12 +855,12 @@ static int __init ipl_init(void)
- {
- 	int rc;
- 
+-out:
+-	return rc;
+-}
+-
+-static int __init ipl_init(void)
+-{
+-	int rc;
+-
 -	rc = firmware_register(&ipl_subsys);
 -	if (rc)
 -		return rc;
-+	ipl_kset = kset_create_and_add("ipl", NULL, firmware_kobj);
-+	if (!ipl_kset)
-+		return -ENOMEM;
- 	switch (ipl_info.type) {
- 	case IPL_TYPE_CCW:
+-	switch (ipl_info.type) {
+-	case IPL_TYPE_CCW:
 -		rc = sysfs_create_group(&ipl_subsys.kobj,
-+		rc = sysfs_create_group(&ipl_kset->kobj,
- 					&ipl_ccw_attr_group);
- 		break;
- 	case IPL_TYPE_FCP:
-@@ -852,16 +868,16 @@ static int __init ipl_init(void)
- 		rc = ipl_register_fcp_files();
- 		break;
- 	case IPL_TYPE_NSS:
+-					&ipl_ccw_attr_group);
+-		break;
+-	case IPL_TYPE_FCP:
+-	case IPL_TYPE_FCP_DUMP:
+-		rc = ipl_register_fcp_files();
+-		break;
+-	case IPL_TYPE_NSS:
 -		rc = sysfs_create_group(&ipl_subsys.kobj,
-+		rc = sysfs_create_group(&ipl_kset->kobj,
- 					&ipl_nss_attr_group);
- 		break;
- 	default:
+-					&ipl_nss_attr_group);
+-		break;
+-	default:
 -		rc = sysfs_create_group(&ipl_subsys.kobj,
-+		rc = sysfs_create_group(&ipl_kset->kobj,
- 					&ipl_unknown_attr_group);
- 		break;
- 	}
- 	if (rc)
+-					&ipl_unknown_attr_group);
+-		break;
+-	}
+-	if (rc)
 -		firmware_unregister(&ipl_subsys);
-+		kset_unregister(ipl_kset);
- 	return rc;
+-	return rc;
  }
  
-@@ -883,7 +899,7 @@ static int __init reipl_nss_init(void)
+ static void __init reipl_probe(void)
+@@ -883,7 +721,7 @@ static int __init reipl_nss_init(void)
  
  	if (!MACHINE_IS_VM)
  		return 0;
@@ -15617,7 +16191,7 @@
  	if (rc)
  		return rc;
  	strncpy(reipl_nss_name, kernel_nss_name, NSS_NAME_SIZE + 1);
-@@ -898,7 +914,7 @@ static int __init reipl_ccw_init(void)
+@@ -898,7 +736,7 @@ static int __init reipl_ccw_init(void)
  	reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
  	if (!reipl_block_ccw)
  		return -ENOMEM;
@@ -15626,7 +16200,25 @@
  	if (rc) {
  		free_page((unsigned long)reipl_block_ccw);
  		return rc;
-@@ -936,7 +952,7 @@ static int __init reipl_fcp_init(void)
+@@ -907,6 +745,7 @@ static int __init reipl_ccw_init(void)
+ 	reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
+ 	reipl_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
+ 	reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
++	reipl_block_ccw->hdr.flags = DIAG308_FLAGS_LP_VALID;
+ 	/* check if read scp info worked and set loadparm */
+ 	if (sclp_ipl_info.is_valid)
+ 		memcpy(reipl_block_ccw->ipl_info.ccw.load_param,
+@@ -915,8 +754,7 @@ static int __init reipl_ccw_init(void)
+ 		/* read scp info failed: set empty loadparm (EBCDIC blanks) */
+ 		memset(reipl_block_ccw->ipl_info.ccw.load_param, 0x40,
+ 		       LOADPARM_LEN);
+-	/* FIXME: check for diag308_set_works when enabling diag ccw reipl */
+-	if (!MACHINE_IS_VM)
++	if (!MACHINE_IS_VM && !diag308_set_works)
+ 		sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
+ 	if (ipl_info.type == IPL_TYPE_CCW)
+ 		reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
+@@ -936,7 +774,7 @@ static int __init reipl_fcp_init(void)
  	reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
  	if (!reipl_block_fcp)
  		return -ENOMEM;
@@ -15635,7 +16227,12 @@
  	if (rc) {
  		free_page((unsigned long)reipl_block_fcp);
  		return rc;
-@@ -958,12 +974,12 @@ static int __init reipl_init(void)
+@@ -954,16 +792,16 @@ static int __init reipl_fcp_init(void)
+ 	return 0;
+ }
+ 
+-static int __init reipl_init(void)
++static int reipl_init(void)
  {
  	int rc;
  
@@ -15653,7 +16250,148 @@
  		return rc;
  	}
  	rc = reipl_ccw_init();
-@@ -988,7 +1004,7 @@ static int __init dump_ccw_init(void)
+@@ -981,6 +819,140 @@ static int __init reipl_init(void)
+ 	return 0;
+ }
+ 
++static struct shutdown_action reipl_action = {SHUTDOWN_ACTION_REIPL_STR,
++					      reipl_run, reipl_init};
++
++/*
++ * dump shutdown action: Dump Linux on shutdown.
++ */
++
++/* FCP dump device attributes */
++
++DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n",
++		   dump_block_fcp->ipl_info.fcp.wwpn);
++DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n",
++		   dump_block_fcp->ipl_info.fcp.lun);
++DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
++		   dump_block_fcp->ipl_info.fcp.bootprog);
++DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
++		   dump_block_fcp->ipl_info.fcp.br_lba);
++DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
++		   dump_block_fcp->ipl_info.fcp.devno);
++
++static struct attribute *dump_fcp_attrs[] = {
++	&sys_dump_fcp_device_attr.attr,
++	&sys_dump_fcp_wwpn_attr.attr,
++	&sys_dump_fcp_lun_attr.attr,
++	&sys_dump_fcp_bootprog_attr.attr,
++	&sys_dump_fcp_br_lba_attr.attr,
++	NULL,
++};
++
++static struct attribute_group dump_fcp_attr_group = {
++	.name  = IPL_FCP_STR,
++	.attrs = dump_fcp_attrs,
++};
++
++/* CCW dump device attributes */
++
++DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
++		   dump_block_ccw->ipl_info.ccw.devno);
++
++static struct attribute *dump_ccw_attrs[] = {
++	&sys_dump_ccw_device_attr.attr,
++	NULL,
++};
++
++static struct attribute_group dump_ccw_attr_group = {
++	.name  = IPL_CCW_STR,
++	.attrs = dump_ccw_attrs,
++};
++
++/* dump type */
++
++static int dump_set_type(enum dump_type type)
++{
++	if (!(dump_capabilities & type))
++		return -EINVAL;
++	switch (type) {
++	case DUMP_TYPE_CCW:
++		if (diag308_set_works)
++			dump_method = DUMP_METHOD_CCW_DIAG;
++		else if (MACHINE_IS_VM)
++			dump_method = DUMP_METHOD_CCW_VM;
++		else
++			dump_method = DUMP_METHOD_CCW_CIO;
++		break;
++	case DUMP_TYPE_FCP:
++		dump_method = DUMP_METHOD_FCP_DIAG;
++		break;
++	default:
++		dump_method = DUMP_METHOD_NONE;
++	}
++	dump_type = type;
++	return 0;
++}
++
++static ssize_t dump_type_show(struct kobject *kobj,
++			      struct kobj_attribute *attr, char *page)
++{
++	return sprintf(page, "%s\n", dump_type_str(dump_type));
++}
++
++static ssize_t dump_type_store(struct kobject *kobj,
++			       struct kobj_attribute *attr,
++			       const char *buf, size_t len)
++{
++	int rc = -EINVAL;
++
++	if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0)
++		rc = dump_set_type(DUMP_TYPE_NONE);
++	else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0)
++		rc = dump_set_type(DUMP_TYPE_CCW);
++	else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0)
++		rc = dump_set_type(DUMP_TYPE_FCP);
++	return (rc != 0) ? rc : len;
++}
++
++static struct kobj_attribute dump_type_attr =
++	__ATTR(dump_type, 0644, dump_type_show, dump_type_store);
++
++static struct kset *dump_kset;
++
++static void dump_run(struct shutdown_trigger *trigger)
++{
++	struct ccw_dev_id devid;
++	static char buf[100];
++
++	switch (dump_method) {
++	case DUMP_METHOD_CCW_CIO:
++		smp_send_stop();
++		devid.devno = dump_block_ccw->ipl_info.ccw.devno;
++		devid.ssid  = 0;
++		reipl_ccw_dev(&devid);
++		break;
++	case DUMP_METHOD_CCW_VM:
++		smp_send_stop();
++		sprintf(buf, "STORE STATUS");
++		__cpcmd(buf, NULL, 0, NULL);
++		sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
++		__cpcmd(buf, NULL, 0, NULL);
++		break;
++	case DUMP_METHOD_CCW_DIAG:
++		diag308(DIAG308_SET, dump_block_ccw);
++		diag308(DIAG308_DUMP, NULL);
++		break;
++	case DUMP_METHOD_FCP_DIAG:
++		diag308(DIAG308_SET, dump_block_fcp);
++		diag308(DIAG308_DUMP, NULL);
++		break;
++	case DUMP_METHOD_NONE:
++	default:
++		return;
++	}
++	printk(KERN_EMERG "Dump failed!\n");
++}
++
+ static int __init dump_ccw_init(void)
+ {
+ 	int rc;
+@@ -988,7 +960,7 @@ static int __init dump_ccw_init(void)
  	dump_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
  	if (!dump_block_ccw)
  		return -ENOMEM;
@@ -15662,7 +16400,7 @@
  	if (rc) {
  		free_page((unsigned long)dump_block_ccw);
  		return rc;
-@@ -1012,7 +1028,7 @@ static int __init dump_fcp_init(void)
+@@ -1012,7 +984,7 @@ static int __init dump_fcp_init(void)
  	dump_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
  	if (!dump_block_fcp)
  		return -ENOMEM;
@@ -15671,7 +16409,29 @@
  	if (rc) {
  		free_page((unsigned long)dump_block_fcp);
  		return rc;
-@@ -1047,12 +1063,12 @@ static int __init dump_init(void)
+@@ -1026,33 +998,16 @@ static int __init dump_fcp_init(void)
+ 	return 0;
+ }
+ 
+-#define SHUTDOWN_ON_PANIC_PRIO 0
+-
+-static int shutdown_on_panic_notify(struct notifier_block *self,
+-				    unsigned long event, void *data)
+-{
+-	if (on_panic_action == SHUTDOWN_DUMP)
+-		do_dump();
+-	else if (on_panic_action == SHUTDOWN_REIPL)
+-		do_reipl();
+-	return NOTIFY_OK;
+-}
+-
+-static struct notifier_block shutdown_on_panic_nb = {
+-	.notifier_call = shutdown_on_panic_notify,
+-	.priority = SHUTDOWN_ON_PANIC_PRIO
+-};
+-
+-static int __init dump_init(void)
++static int dump_init(void)
  {
  	int rc;
  
@@ -15682,306 +16442,2070 @@
 +	dump_kset = kset_create_and_add("dump", NULL, firmware_kobj);
 +	if (!dump_kset)
 +		return -ENOMEM;
-+	rc = sysfs_create_file(&dump_kset->kobj, &dump_type_attr);
++	rc = sysfs_create_file(&dump_kset->kobj, &dump_type_attr.attr);
  	if (rc) {
 -		firmware_unregister(&dump_subsys);
 +		kset_unregister(dump_kset);
  		return rc;
  	}
  	rc = dump_ccw_init();
-@@ -1069,12 +1085,13 @@ static int __init shutdown_actions_init(void)
+@@ -1065,46 +1020,381 @@ static int __init dump_init(void)
+ 	return 0;
+ }
+ 
+-static int __init shutdown_actions_init(void)
++static struct shutdown_action dump_action = {SHUTDOWN_ACTION_DUMP_STR,
++					     dump_run, dump_init};
++
++/*
++ * vmcmd shutdown action: Trigger vm command on shutdown.
++ */
++
++static char vmcmd_on_reboot[128];
++static char vmcmd_on_panic[128];
++static char vmcmd_on_halt[128];
++static char vmcmd_on_poff[128];
++
++DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot);
++DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic);
++DEFINE_IPL_ATTR_STR_RW(vmcmd, on_halt, "%s\n", "%s\n", vmcmd_on_halt);
++DEFINE_IPL_ATTR_STR_RW(vmcmd, on_poff, "%s\n", "%s\n", vmcmd_on_poff);
++
++static struct attribute *vmcmd_attrs[] = {
++	&sys_vmcmd_on_reboot_attr.attr,
++	&sys_vmcmd_on_panic_attr.attr,
++	&sys_vmcmd_on_halt_attr.attr,
++	&sys_vmcmd_on_poff_attr.attr,
++	NULL,
++};
++
++static struct attribute_group vmcmd_attr_group = {
++	.attrs = vmcmd_attrs,
++};
++
++static struct kset *vmcmd_kset;
++
++static void vmcmd_run(struct shutdown_trigger *trigger)
++{
++	char *cmd, *next_cmd;
++
++	if (strcmp(trigger->name, ON_REIPL_STR) == 0)
++		cmd = vmcmd_on_reboot;
++	else if (strcmp(trigger->name, ON_PANIC_STR) == 0)
++		cmd = vmcmd_on_panic;
++	else if (strcmp(trigger->name, ON_HALT_STR) == 0)
++		cmd = vmcmd_on_halt;
++	else if (strcmp(trigger->name, ON_POFF_STR) == 0)
++		cmd = vmcmd_on_poff;
++	else
++		return;
++
++	if (strlen(cmd) == 0)
++		return;
++	do {
++		next_cmd = strchr(cmd, '\n');
++		if (next_cmd) {
++			next_cmd[0] = 0;
++			next_cmd += 1;
++		}
++		__cpcmd(cmd, NULL, 0, NULL);
++		cmd = next_cmd;
++	} while (cmd != NULL);
++}
++
++static int vmcmd_init(void)
  {
- 	int rc;
+-	int rc;
++	if (!MACHINE_IS_VM)
++		return -ENOTSUPP;
++	vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj);
++	if (!vmcmd_kset)
++		return -ENOMEM;
++	return sysfs_create_group(&vmcmd_kset->kobj, &vmcmd_attr_group);
++}
  
 -	rc = firmware_register(&shutdown_actions_subsys);
 -	if (rc)
 -		return rc;
 -	rc = subsys_create_file(&shutdown_actions_subsys, &on_panic_attr);
-+	shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL,
-+						    firmware_kobj);
-+	if (!shutdown_actions_kset)
-+		return -ENOMEM;
-+	rc = sysfs_create_file(&shutdown_actions_kset->kobj, &on_panic_attr);
- 	if (rc) {
+-	if (rc) {
 -		firmware_unregister(&shutdown_actions_subsys);
-+		kset_unregister(shutdown_actions_kset);
- 		return rc;
+-		return rc;
++static struct shutdown_action vmcmd_action = {SHUTDOWN_ACTION_VMCMD_STR,
++					      vmcmd_run, vmcmd_init};
++
++/*
++ * stop shutdown action: Stop Linux on shutdown.
++ */
++
++static void stop_run(struct shutdown_trigger *trigger)
++{
++	if (strcmp(trigger->name, ON_PANIC_STR) == 0)
++		disabled_wait((unsigned long) __builtin_return_address(0));
++	else {
++		signal_processor(smp_processor_id(), sigp_stop);
++		for (;;);
  	}
- 	atomic_notifier_chain_register(&panic_notifier_list,
-diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
-index 22b800c..3bbac12 100644
---- a/arch/s390/kernel/time.c
-+++ b/arch/s390/kernel/time.c
-@@ -1145,7 +1145,7 @@ static void etr_work_fn(struct work_struct *work)
-  * Sysfs interface functions
-  */
- static struct sysdev_class etr_sysclass = {
--	set_kset_name("etr")
-+	.name	= "etr",
- };
+-	atomic_notifier_chain_register(&panic_notifier_list,
+-				       &shutdown_on_panic_nb);
+-	return 0;
+ }
  
- static struct sys_device etr_port0_dev = {
-diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c
-index eebcd47..51b57c0 100644
---- a/arch/sh/drivers/dma/dma-sysfs.c
-+++ b/arch/sh/drivers/dma/dma-sysfs.c
-@@ -19,7 +19,7 @@
- #include <asm/dma.h>
+-static int __init s390_ipl_init(void)
++static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR,
++					     stop_run, NULL};
++
++/* action list */
++
++static struct shutdown_action *shutdown_actions_list[] = {
++	&ipl_action, &reipl_action, &dump_action, &vmcmd_action, &stop_action};
++#define SHUTDOWN_ACTIONS_COUNT (sizeof(shutdown_actions_list) / sizeof(void *))
++
++/*
++ * Trigger section
++ */
++
++static struct kset *shutdown_actions_kset;
++
++static int set_trigger(const char *buf, struct shutdown_trigger *trigger,
++		       size_t len)
+ {
+-	int rc;
++	int i;
++	for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) {
++		if (!shutdown_actions_list[i])
++			continue;
++		if (strncmp(buf, shutdown_actions_list[i]->name,
++			    strlen(shutdown_actions_list[i]->name)) == 0) {
++			trigger->action = shutdown_actions_list[i];
++			return len;
++		}
++	}
++	return -EINVAL;
++}
  
- static struct sysdev_class dma_sysclass = {
--	set_kset_name("dma"),
-+	.name = "dma",
- };
- EXPORT_SYMBOL(dma_sysclass);
+-	sclp_get_ipl_info(&sclp_ipl_info);
++/* on reipl */
++
++static struct shutdown_trigger on_reboot_trigger = {ON_REIPL_STR,
++						    &reipl_action};
++
++static ssize_t on_reboot_show(struct kobject *kobj,
++			      struct kobj_attribute *attr, char *page)
++{
++	return sprintf(page, "%s\n", on_reboot_trigger.action->name);
++}
++
++static ssize_t on_reboot_store(struct kobject *kobj,
++			       struct kobj_attribute *attr,
++			       const char *buf, size_t len)
++{
++	return set_trigger(buf, &on_reboot_trigger, len);
++}
++
++static struct kobj_attribute on_reboot_attr =
++	__ATTR(on_reboot, 0644, on_reboot_show, on_reboot_store);
++
++static void do_machine_restart(char *__unused)
++{
++	smp_send_stop();
++	on_reboot_trigger.action->fn(&on_reboot_trigger);
++	reipl_run(NULL);
++}
++void (*_machine_restart)(char *command) = do_machine_restart;
++
++/* on panic */
++
++static struct shutdown_trigger on_panic_trigger = {ON_PANIC_STR, &stop_action};
++
++static ssize_t on_panic_show(struct kobject *kobj,
++			     struct kobj_attribute *attr, char *page)
++{
++	return sprintf(page, "%s\n", on_panic_trigger.action->name);
++}
++
++static ssize_t on_panic_store(struct kobject *kobj,
++			      struct kobj_attribute *attr,
++			      const char *buf, size_t len)
++{
++	return set_trigger(buf, &on_panic_trigger, len);
++}
++
++static struct kobj_attribute on_panic_attr =
++	__ATTR(on_panic, 0644, on_panic_show, on_panic_store);
++
++static void do_panic(void)
++{
++	on_panic_trigger.action->fn(&on_panic_trigger);
++	stop_run(&on_panic_trigger);
++}
++
++/* on halt */
++
++static struct shutdown_trigger on_halt_trigger = {ON_HALT_STR, &stop_action};
++
++static ssize_t on_halt_show(struct kobject *kobj,
++			    struct kobj_attribute *attr, char *page)
++{
++	return sprintf(page, "%s\n", on_halt_trigger.action->name);
++}
++
++static ssize_t on_halt_store(struct kobject *kobj,
++			     struct kobj_attribute *attr,
++			     const char *buf, size_t len)
++{
++	return set_trigger(buf, &on_halt_trigger, len);
++}
++
++static struct kobj_attribute on_halt_attr =
++	__ATTR(on_halt, 0644, on_halt_show, on_halt_store);
++
++
++static void do_machine_halt(void)
++{
++	smp_send_stop();
++	on_halt_trigger.action->fn(&on_halt_trigger);
++	stop_run(&on_halt_trigger);
++}
++void (*_machine_halt)(void) = do_machine_halt;
++
++/* on power off */
++
++static struct shutdown_trigger on_poff_trigger = {ON_POFF_STR, &stop_action};
++
++static ssize_t on_poff_show(struct kobject *kobj,
++			    struct kobj_attribute *attr, char *page)
++{
++	return sprintf(page, "%s\n", on_poff_trigger.action->name);
++}
++
++static ssize_t on_poff_store(struct kobject *kobj,
++			     struct kobj_attribute *attr,
++			     const char *buf, size_t len)
++{
++	return set_trigger(buf, &on_poff_trigger, len);
++}
++
++static struct kobj_attribute on_poff_attr =
++	__ATTR(on_poff, 0644, on_poff_show, on_poff_store);
++
++
++static void do_machine_power_off(void)
++{
++	smp_send_stop();
++	on_poff_trigger.action->fn(&on_poff_trigger);
++	stop_run(&on_poff_trigger);
++}
++void (*_machine_power_off)(void) = do_machine_power_off;
++
++static void __init shutdown_triggers_init(void)
++{
++	shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL,
++						    firmware_kobj);
++	if (!shutdown_actions_kset)
++		goto fail;
++	if (sysfs_create_file(&shutdown_actions_kset->kobj,
++			      &on_reboot_attr.attr))
++		goto fail;
++	if (sysfs_create_file(&shutdown_actions_kset->kobj,
++			      &on_panic_attr.attr))
++		goto fail;
++	if (sysfs_create_file(&shutdown_actions_kset->kobj,
++			      &on_halt_attr.attr))
++		goto fail;
++	if (sysfs_create_file(&shutdown_actions_kset->kobj,
++			      &on_poff_attr.attr))
++		goto fail;
++
++	return;
++fail:
++	panic("shutdown_triggers_init failed\n");
++}
++
++static void __init shutdown_actions_init(void)
++{
++	int i;
++
++	for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) {
++		if (!shutdown_actions_list[i]->init)
++			continue;
++		if (shutdown_actions_list[i]->init())
++			shutdown_actions_list[i] = NULL;
++	}
++}
++
++static int __init s390_ipl_init(void)
++{
+ 	reipl_probe();
+-	rc = ipl_init();
+-	if (rc)
+-		return rc;
+-	rc = reipl_init();
+-	if (rc)
+-		return rc;
+-	rc = dump_init();
+-	if (rc)
+-		return rc;
+-	rc = shutdown_actions_init();
+-	if (rc)
+-		return rc;
++	sclp_get_ipl_info(&sclp_ipl_info);
++	shutdown_actions_init();
++	shutdown_triggers_init();
+ 	return 0;
+ }
  
-diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
-index b22a78c..3008c00 100644
---- a/arch/sh/kernel/cpu/sh4/sq.c
-+++ b/arch/sh/kernel/cpu/sh4/sq.c
-@@ -341,17 +341,18 @@ static int __devinit sq_sysdev_add(struct sys_device *sysdev)
+ __initcall(s390_ipl_init);
+ 
++static void __init strncpy_skip_quote(char *dst, char *src, int n)
++{
++	int sx, dx;
++
++	dx = 0;
++	for (sx = 0; src[sx] != 0; sx++) {
++		if (src[sx] == '"')
++			continue;
++		dst[dx++] = src[sx];
++		if (dx >= n)
++			break;
++	}
++}
++
++static int __init vmcmd_on_reboot_setup(char *str)
++{
++	if (!MACHINE_IS_VM)
++		return 1;
++	strncpy_skip_quote(vmcmd_on_reboot, str, 127);
++	vmcmd_on_reboot[127] = 0;
++	on_reboot_trigger.action = &vmcmd_action;
++	return 1;
++}
++__setup("vmreboot=", vmcmd_on_reboot_setup);
++
++static int __init vmcmd_on_panic_setup(char *str)
++{
++	if (!MACHINE_IS_VM)
++		return 1;
++	strncpy_skip_quote(vmcmd_on_panic, str, 127);
++	vmcmd_on_panic[127] = 0;
++	on_panic_trigger.action = &vmcmd_action;
++	return 1;
++}
++__setup("vmpanic=", vmcmd_on_panic_setup);
++
++static int __init vmcmd_on_halt_setup(char *str)
++{
++	if (!MACHINE_IS_VM)
++		return 1;
++	strncpy_skip_quote(vmcmd_on_halt, str, 127);
++	vmcmd_on_halt[127] = 0;
++	on_halt_trigger.action = &vmcmd_action;
++	return 1;
++}
++__setup("vmhalt=", vmcmd_on_halt_setup);
++
++static int __init vmcmd_on_poff_setup(char *str)
++{
++	if (!MACHINE_IS_VM)
++		return 1;
++	strncpy_skip_quote(vmcmd_on_poff, str, 127);
++	vmcmd_on_poff[127] = 0;
++	on_poff_trigger.action = &vmcmd_action;
++	return 1;
++}
++__setup("vmpoff=", vmcmd_on_poff_setup);
++
++static int on_panic_notify(struct notifier_block *self,
++			   unsigned long event, void *data)
++{
++	do_panic();
++	return NOTIFY_OK;
++}
++
++static struct notifier_block on_panic_nb = {
++	.notifier_call = on_panic_notify,
++	.priority = 0,
++};
++
++void __init setup_ipl(void)
++{
++	ipl_info.type = get_ipl_type();
++	switch (ipl_info.type) {
++	case IPL_TYPE_CCW:
++		ipl_info.data.ccw.dev_id.devno = ipl_devno;
++		ipl_info.data.ccw.dev_id.ssid = 0;
++		break;
++	case IPL_TYPE_FCP:
++	case IPL_TYPE_FCP_DUMP:
++		ipl_info.data.fcp.dev_id.devno =
++			IPL_PARMBLOCK_START->ipl_info.fcp.devno;
++		ipl_info.data.fcp.dev_id.ssid = 0;
++		ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
++		ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
++		break;
++	case IPL_TYPE_NSS:
++		strncpy(ipl_info.data.nss.name, kernel_nss_name,
++			sizeof(ipl_info.data.nss.name));
++		break;
++	case IPL_TYPE_UNKNOWN:
++	default:
++		/* We have no info to copy */
++		break;
++	}
++	atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
++}
++
+ void __init ipl_save_parameters(void)
  {
- 	unsigned int cpu = sysdev->id;
- 	struct kobject *kobj;
-+	int error;
+ 	struct cio_iplinfo iplinfo;
+@@ -1185,3 +1475,4 @@ void s390_reset_system(void)
  
- 	sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
- 	if (unlikely(!sq_kobject[cpu]))
- 		return -ENOMEM;
+ 	do_reset_calls();
+ }
++
+diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
+index 29f7884..0e7aca0 100644
+--- a/arch/s390/kernel/process.c
++++ b/arch/s390/kernel/process.c
+@@ -36,7 +36,7 @@
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/notifier.h>
+-
++#include <linux/utsname.h>
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+ #include <asm/system.h>
+@@ -182,13 +182,15 @@ void cpu_idle(void)
  
- 	kobj = sq_kobject[cpu];
--	kobj->parent = &sysdev->kobj;
--	kobject_set_name(kobj, "%s", "sq");
--	kobj->ktype = &ktype_percpu_entry;
+ void show_regs(struct pt_regs *regs)
+ {
+-	struct task_struct *tsk = current;
 -
--	return kobject_register(kobj);
-+	error = kobject_init_and_add(kobj, &ktype_percpu_entry, &sysdev->kobj,
-+				     "%s", "sq");
-+	if (!error)
-+		kobject_uevent(kobj, KOBJ_ADD);
-+	return error;
- }
+-        printk("CPU:    %d    %s\n", task_thread_info(tsk)->cpu, print_tainted());
+-        printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
+-	       current->comm, task_pid_nr(current), (void *) tsk,
+-	       (void *) tsk->thread.ksp);
+-
++	print_modules();
++	printk("CPU: %d %s %s %.*s\n",
++	       task_thread_info(current)->cpu, print_tainted(),
++	       init_utsname()->release,
++	       (int)strcspn(init_utsname()->version, " "),
++	       init_utsname()->version);
++	printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
++	       current->comm, current->pid, current,
++	       (void *) current->thread.ksp);
+ 	show_registers(regs);
+ 	/* Show stack backtrace if pt_regs is from kernel mode */
+ 	if (!(regs->psw.mask & PSW_MASK_PSTATE))
+diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
+index 1d81bf9..6e036ba 100644
+--- a/arch/s390/kernel/ptrace.c
++++ b/arch/s390/kernel/ptrace.c
+@@ -86,13 +86,13 @@ FixPerRegisters(struct task_struct *task)
+ 		per_info->control_regs.bits.storage_alt_space_ctl = 0;
+ }
+ 
+-static void set_single_step(struct task_struct *task)
++void user_enable_single_step(struct task_struct *task)
+ {
+ 	task->thread.per_info.single_step = 1;
+ 	FixPerRegisters(task);
+ }
+ 
+-static void clear_single_step(struct task_struct *task)
++void user_disable_single_step(struct task_struct *task)
+ {
+ 	task->thread.per_info.single_step = 0;
+ 	FixPerRegisters(task);
+@@ -107,7 +107,7 @@ void
+ ptrace_disable(struct task_struct *child)
+ {
+ 	/* make sure the single step bit is not set. */
+-	clear_single_step(child);
++	user_disable_single_step(child);
+ }
+ 
+ #ifndef CONFIG_64BIT
+@@ -651,7 +651,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
+ 			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ 		child->exit_code = data;
+ 		/* make sure the single step bit is not set. */
+-		clear_single_step(child);
++		user_disable_single_step(child);
+ 		wake_up_process(child);
+ 		return 0;
  
- static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
-@@ -359,7 +360,7 @@ static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
- 	unsigned int cpu = sysdev->id;
- 	struct kobject *kobj = sq_kobject[cpu];
+@@ -665,7 +665,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
+ 			return 0;
+ 		child->exit_code = SIGKILL;
+ 		/* make sure the single step bit is not set. */
+-		clear_single_step(child);
++		user_disable_single_step(child);
+ 		wake_up_process(child);
+ 		return 0;
  
--	kobject_unregister(kobj);
-+	kobject_put(kobj);
- 	return 0;
+@@ -675,10 +675,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
+ 			return -EIO;
+ 		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ 		child->exit_code = data;
+-		if (data)
+-			set_tsk_thread_flag(child, TIF_SINGLE_STEP);
+-		else
+-			set_single_step(child);
++		user_enable_single_step(child);
+ 		/* give it a chance to run. */
+ 		wake_up_process(child);
+ 		return 0;
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index 577aa7d..766c783 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -126,75 +126,6 @@ void __cpuinit cpu_init(void)
  }
  
-diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
-index a3a67d1..2bc04bf 100644
---- a/arch/sh/kernel/time.c
-+++ b/arch/sh/kernel/time.c
-@@ -174,7 +174,7 @@ int timer_resume(struct sys_device *dev)
- #endif
- 
- static struct sysdev_class timer_sysclass = {
--	set_kset_name("timer"),
-+	.name	 = "timer",
- 	.suspend = timer_suspend,
- 	.resume	 = timer_resume,
- };
-diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
-index 46bb609..3874c2d 100644
---- a/arch/x86/crypto/Makefile
-+++ b/arch/x86/crypto/Makefile
-@@ -4,12 +4,16 @@
- 
- obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
- obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
-+obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
+ /*
+- * VM halt and poweroff setup routines
+- */
+-char vmhalt_cmd[128] = "";
+-char vmpoff_cmd[128] = "";
+-static char vmpanic_cmd[128] = "";
+-
+-static void strncpy_skip_quote(char *dst, char *src, int n)
+-{
+-        int sx, dx;
+-
+-        dx = 0;
+-        for (sx = 0; src[sx] != 0; sx++) {
+-                if (src[sx] == '"') continue;
+-                dst[dx++] = src[sx];
+-                if (dx >= n) break;
+-        }
+-}
+-
+-static int __init vmhalt_setup(char *str)
+-{
+-        strncpy_skip_quote(vmhalt_cmd, str, 127);
+-        vmhalt_cmd[127] = 0;
+-        return 1;
+-}
+-
+-__setup("vmhalt=", vmhalt_setup);
+-
+-static int __init vmpoff_setup(char *str)
+-{
+-        strncpy_skip_quote(vmpoff_cmd, str, 127);
+-        vmpoff_cmd[127] = 0;
+-        return 1;
+-}
+-
+-__setup("vmpoff=", vmpoff_setup);
+-
+-static int vmpanic_notify(struct notifier_block *self, unsigned long event,
+-			  void *data)
+-{
+-	if (MACHINE_IS_VM && strlen(vmpanic_cmd) > 0)
+-		cpcmd(vmpanic_cmd, NULL, 0, NULL);
+-
+-	return NOTIFY_OK;
+-}
+-
+-#define PANIC_PRI_VMPANIC	0
+-
+-static struct notifier_block vmpanic_nb = {
+-	.notifier_call = vmpanic_notify,
+-	.priority = PANIC_PRI_VMPANIC
+-};
+-
+-static int __init vmpanic_setup(char *str)
+-{
+-	static int register_done __initdata = 0;
+-
+-	strncpy_skip_quote(vmpanic_cmd, str, 127);
+-	vmpanic_cmd[127] = 0;
+-	if (!register_done) {
+-		register_done = 1;
+-		atomic_notifier_chain_register(&panic_notifier_list,
+-					       &vmpanic_nb);
+-	}
+-	return 1;
+-}
+-
+-__setup("vmpanic=", vmpanic_setup);
+-
+-/*
+  * condev= and conmode= setup parameter.
+  */
  
- obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
- obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
-+obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
+@@ -308,38 +239,6 @@ static void __init setup_zfcpdump(unsigned int console_devno)
+ static inline void setup_zfcpdump(unsigned int console_devno) {}
+ #endif /* CONFIG_ZFCPDUMP */
  
--aes-i586-y := aes-i586-asm_32.o aes_32.o
--twofish-i586-y := twofish-i586-asm_32.o twofish_32.o
-+aes-i586-y := aes-i586-asm_32.o aes_glue.o
-+twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o
-+salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o
+-#ifdef CONFIG_SMP
+-void (*_machine_restart)(char *command) = machine_restart_smp;
+-void (*_machine_halt)(void) = machine_halt_smp;
+-void (*_machine_power_off)(void) = machine_power_off_smp;
+-#else
+-/*
+- * Reboot, halt and power_off routines for non SMP.
+- */
+-static void do_machine_restart_nonsmp(char * __unused)
+-{
+-	do_reipl();
+-}
+-
+-static void do_machine_halt_nonsmp(void)
+-{
+-        if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
+-		__cpcmd(vmhalt_cmd, NULL, 0, NULL);
+-        signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+-}
+-
+-static void do_machine_power_off_nonsmp(void)
+-{
+-        if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
+-		__cpcmd(vmpoff_cmd, NULL, 0, NULL);
+-        signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+-}
+-
+-void (*_machine_restart)(char *command) = do_machine_restart_nonsmp;
+-void (*_machine_halt)(void) = do_machine_halt_nonsmp;
+-void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
+-#endif
+-
+  /*
+  * Reboot, halt and power_off stubs. They just call _machine_restart,
+  * _machine_halt or _machine_power_off. 
+@@ -559,7 +458,9 @@ setup_resources(void)
+ 	data_resource.start = (unsigned long) &_etext;
+ 	data_resource.end = (unsigned long) &_edata - 1;
+ 
+-	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
++	for (i = 0; i < MEMORY_CHUNKS; i++) {
++		if (!memory_chunk[i].size)
++			continue;
+ 		res = alloc_bootmem_low(sizeof(struct resource));
+ 		res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
+ 		switch (memory_chunk[i].type) {
+@@ -617,7 +518,7 @@ EXPORT_SYMBOL_GPL(real_memory_size);
+ static void __init setup_memory_end(void)
+ {
+ 	unsigned long memory_size;
+-	unsigned long max_mem, max_phys;
++	unsigned long max_mem;
+ 	int i;
  
--aes-x86_64-y := aes-x86_64-asm_64.o aes_64.o
--twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_64.o
-+aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
-+twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
-+salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
-diff --git a/arch/x86/crypto/aes-i586-asm_32.S b/arch/x86/crypto/aes-i586-asm_32.S
-index f942f0c..1093bed 100644
---- a/arch/x86/crypto/aes-i586-asm_32.S
-+++ b/arch/x86/crypto/aes-i586-asm_32.S
-@@ -46,9 +46,9 @@
- #define in_blk 16
+ #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
+@@ -625,10 +526,31 @@ static void __init setup_memory_end(void)
+ 		memory_end = ZFCPDUMP_HSA_SIZE;
+ #endif
+ 	memory_size = 0;
+-	max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE;
+ 	memory_end &= PAGE_MASK;
+ 
+-	max_mem = memory_end ? min(max_phys, memory_end) : max_phys;
++	max_mem = memory_end ? min(VMALLOC_START, memory_end) : VMALLOC_START;
++	memory_end = min(max_mem, memory_end);
++
++	/*
++	 * Make sure all chunks are MAX_ORDER aligned so we don't need the
++	 * extra checks that HOLES_IN_ZONE would require.
++	 */
++	for (i = 0; i < MEMORY_CHUNKS; i++) {
++		unsigned long start, end;
++		struct mem_chunk *chunk;
++		unsigned long align;
++
++		chunk = &memory_chunk[i];
++		align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
++		start = (chunk->addr + align - 1) & ~(align - 1);
++		end = (chunk->addr + chunk->size) & ~(align - 1);
++		if (start >= end)
++			memset(chunk, 0, sizeof(*chunk));
++		else {
++			chunk->addr = start;
++			chunk->size = end - start;
++		}
++	}
  
- /* offsets in crypto_tfm structure */
--#define ekey (crypto_tfm_ctx_offset + 0)
--#define nrnd (crypto_tfm_ctx_offset + 256)
--#define dkey (crypto_tfm_ctx_offset + 260)
-+#define klen (crypto_tfm_ctx_offset + 0)
-+#define ekey (crypto_tfm_ctx_offset + 4)
-+#define dkey (crypto_tfm_ctx_offset + 244)
+ 	for (i = 0; i < MEMORY_CHUNKS; i++) {
+ 		struct mem_chunk *chunk = &memory_chunk[i];
+@@ -890,7 +812,7 @@ setup_arch(char **cmdline_p)
  
- // register mapping for encrypt and decrypt subroutines
+ 	parse_early_param();
  
-@@ -221,8 +221,8 @@
+-	setup_ipl_info();
++	setup_ipl();
+ 	setup_memory_end();
+ 	setup_addressing_mode();
+ 	setup_memory();
+@@ -899,7 +821,6 @@ setup_arch(char **cmdline_p)
  
- .global  aes_enc_blk
+         cpu_init();
+         __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
+-	smp_setup_cpu_possible_map();
  
--.extern  ft_tab
--.extern  fl_tab
-+.extern  crypto_ft_tab
-+.extern  crypto_fl_tab
+ 	/*
+ 	 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
+@@ -920,7 +841,7 @@ setup_arch(char **cmdline_p)
  
- .align 4
+ void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo)
+ {
+-   printk("cpu %d "
++   printk(KERN_INFO "cpu %d "
+ #ifdef CONFIG_SMP
+            "phys_idx=%d "
+ #endif
+@@ -996,7 +917,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+ static void c_stop(struct seq_file *m, void *v)
+ {
+ }
+-struct seq_operations cpuinfo_op = {
++const struct seq_operations cpuinfo_op = {
+ 	.start	= c_start,
+ 	.next	= c_next,
+ 	.stop	= c_stop,
+diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
+index d264671..4449bf3 100644
+--- a/arch/s390/kernel/signal.c
++++ b/arch/s390/kernel/signal.c
+@@ -471,6 +471,7 @@ void do_signal(struct pt_regs *regs)
+ 
+ 	if (signr > 0) {
+ 		/* Whee!  Actually deliver the signal.  */
++		int ret;
+ #ifdef CONFIG_COMPAT
+ 		if (test_thread_flag(TIF_31BIT)) {
+ 			extern int handle_signal32(unsigned long sig,
+@@ -478,15 +479,12 @@ void do_signal(struct pt_regs *regs)
+ 						   siginfo_t *info,
+ 						   sigset_t *oldset,
+ 						   struct pt_regs *regs);
+-			if (handle_signal32(
+-				    signr, &ka, &info, oldset, regs) == 0) {
+-				if (test_thread_flag(TIF_RESTORE_SIGMASK))
+-					clear_thread_flag(TIF_RESTORE_SIGMASK);
+-			}
+-			return;
++			ret = handle_signal32(signr, &ka, &info, oldset, regs);
+ 	        }
++		else
+ #endif
+-		if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
++			ret = handle_signal(signr, &ka, &info, oldset, regs);
++		if (!ret) {
+ 			/*
+ 			 * A signal was successfully delivered; the saved
+ 			 * sigmask will have been stored in the signal frame,
+@@ -495,6 +493,14 @@ void do_signal(struct pt_regs *regs)
+ 			 */
+ 			if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ 				clear_thread_flag(TIF_RESTORE_SIGMASK);
++
++			/*
++			 * If we would have taken a single-step trap
++			 * for a normal instruction, act like we took
++			 * one for the handler setup.
++			 */
++			if (current->thread.per_info.single_step)
++				set_thread_flag(TIF_SINGLE_STEP);
+ 		}
+ 		return;
+ 	}
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index 264ea90..aa37fa1 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -42,6 +42,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/timer.h>
+ #include <asm/lowcore.h>
++#include <asm/sclp.h>
+ #include <asm/cpu.h>
+ 
+ /*
+@@ -53,11 +54,27 @@ EXPORT_SYMBOL(lowcore_ptr);
+ cpumask_t cpu_online_map = CPU_MASK_NONE;
+ EXPORT_SYMBOL(cpu_online_map);
+ 
+-cpumask_t cpu_possible_map = CPU_MASK_NONE;
++cpumask_t cpu_possible_map = CPU_MASK_ALL;
+ EXPORT_SYMBOL(cpu_possible_map);
+ 
+ static struct task_struct *current_set[NR_CPUS];
+ 
++static u8 smp_cpu_type;
++static int smp_use_sigp_detection;
++
++enum s390_cpu_state {
++	CPU_STATE_STANDBY,
++	CPU_STATE_CONFIGURED,
++};
++
++#ifdef CONFIG_HOTPLUG_CPU
++static DEFINE_MUTEX(smp_cpu_state_mutex);
++#endif
++static int smp_cpu_state[NR_CPUS];
++
++static DEFINE_PER_CPU(struct cpu, cpu_devices);
++DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
++
+ static void smp_ext_bitcall(int, ec_bit_sig);
  
-@@ -236,7 +236,7 @@ aes_enc_blk:
- 1:	push    %ebx
- 	mov     in_blk+4(%esp),%r2
- 	push    %esi
--	mov     nrnd(%ebp),%r3   // number of rounds
-+	mov     klen(%ebp),%r3   // key size
- 	push    %edi
- #if ekey != 0
- 	lea     ekey(%ebp),%ebp  // key pointer
-@@ -255,26 +255,26 @@ aes_enc_blk:
+ /*
+@@ -193,6 +210,33 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ }
+ EXPORT_SYMBOL(smp_call_function_single);
  
- 	sub     $8,%esp		// space for register saves on stack
- 	add     $16,%ebp	// increment to next round key
--	cmp     $12,%r3
-+	cmp     $24,%r3
- 	jb      4f		// 10 rounds for 128-bit key
- 	lea     32(%ebp),%ebp
- 	je      3f		// 12 rounds for 192-bit key
- 	lea     32(%ebp),%ebp
++/**
++ * smp_call_function_mask(): Run a function on a set of other CPUs.
++ * @mask: The set of cpus to run on.  Must not include the current cpu.
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @wait: If true, wait (atomically) until function has completed on other CPUs.
++ *
++ * Returns 0 on success, else a negative status code.
++ *
++ * If @wait is true, then returns once @func has returned; otherwise
++ * it returns just before the target cpu calls @func.
++ *
++ * You must not call this function with disabled interrupts or from a
++ * hardware interrupt handler or from a bottom half handler.
++ */
++int
++smp_call_function_mask(cpumask_t mask,
++			void (*func)(void *), void *info,
++			int wait)
++{
++	preempt_disable();
++	__smp_call_function_map(func, info, 0, wait, mask);
++	preempt_enable();
++	return 0;
++}
++EXPORT_SYMBOL(smp_call_function_mask);
++
+ void smp_send_stop(void)
+ {
+ 	int cpu, rc;
+@@ -217,33 +261,6 @@ void smp_send_stop(void)
+ }
  
--2:	fwd_rnd1( -64(%ebp) ,ft_tab)	// 14 rounds for 256-bit key
--	fwd_rnd2( -48(%ebp) ,ft_tab)
--3:	fwd_rnd1( -32(%ebp) ,ft_tab)	// 12 rounds for 192-bit key
--	fwd_rnd2( -16(%ebp) ,ft_tab)
--4:	fwd_rnd1(    (%ebp) ,ft_tab)	// 10 rounds for 128-bit key
--	fwd_rnd2( +16(%ebp) ,ft_tab)
--	fwd_rnd1( +32(%ebp) ,ft_tab)
--	fwd_rnd2( +48(%ebp) ,ft_tab)
--	fwd_rnd1( +64(%ebp) ,ft_tab)
--	fwd_rnd2( +80(%ebp) ,ft_tab)
--	fwd_rnd1( +96(%ebp) ,ft_tab)
--	fwd_rnd2(+112(%ebp) ,ft_tab)
--	fwd_rnd1(+128(%ebp) ,ft_tab)
--	fwd_rnd2(+144(%ebp) ,fl_tab)	// last round uses a different table
-+2:	fwd_rnd1( -64(%ebp), crypto_ft_tab)	// 14 rounds for 256-bit key
-+	fwd_rnd2( -48(%ebp), crypto_ft_tab)
-+3:	fwd_rnd1( -32(%ebp), crypto_ft_tab)	// 12 rounds for 192-bit key
-+	fwd_rnd2( -16(%ebp), crypto_ft_tab)
-+4:	fwd_rnd1(    (%ebp), crypto_ft_tab)	// 10 rounds for 128-bit key
-+	fwd_rnd2( +16(%ebp), crypto_ft_tab)
-+	fwd_rnd1( +32(%ebp), crypto_ft_tab)
-+	fwd_rnd2( +48(%ebp), crypto_ft_tab)
-+	fwd_rnd1( +64(%ebp), crypto_ft_tab)
-+	fwd_rnd2( +80(%ebp), crypto_ft_tab)
-+	fwd_rnd1( +96(%ebp), crypto_ft_tab)
-+	fwd_rnd2(+112(%ebp), crypto_ft_tab)
-+	fwd_rnd1(+128(%ebp), crypto_ft_tab)
-+	fwd_rnd2(+144(%ebp), crypto_fl_tab)	// last round uses a different table
+ /*
+- * Reboot, halt and power_off routines for SMP.
+- */
+-void machine_restart_smp(char *__unused)
+-{
+-	smp_send_stop();
+-	do_reipl();
+-}
+-
+-void machine_halt_smp(void)
+-{
+-	smp_send_stop();
+-	if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
+-		__cpcmd(vmhalt_cmd, NULL, 0, NULL);
+-	signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+-	for (;;);
+-}
+-
+-void machine_power_off_smp(void)
+-{
+-	smp_send_stop();
+-	if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
+-		__cpcmd(vmpoff_cmd, NULL, 0, NULL);
+-	signal_processor(smp_processor_id(), sigp_stop_and_store_status);
+-	for (;;);
+-}
+-
+-/*
+  * This is the main routine where commands issued by other
+  * cpus are handled.
+  */
+@@ -355,6 +372,13 @@ void smp_ctl_clear_bit(int cr, int bit)
+ }
+ EXPORT_SYMBOL(smp_ctl_clear_bit);
  
- // move final values to the output array.  CAUTION: the 
- // order of these assigns rely on the register mappings
-@@ -297,8 +297,8 @@ aes_enc_blk:
++/*
++ * In early ipl state a temp. logically cpu number is needed, so the sigp
++ * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
++ * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
++ */
++#define CPU_INIT_NO	1
++
+ #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
  
- .global  aes_dec_blk
+ /*
+@@ -375,9 +399,10 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
+ 		       "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS);
+ 		return;
+ 	}
+-	zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area));
+-	__cpu_logical_map[1] = (__u16) phy_cpu;
+-	while (signal_processor(1, sigp_stop_and_store_status) == sigp_busy)
++	zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
++	__cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu;
++	while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
++	       sigp_busy)
+ 		cpu_relax();
+ 	memcpy(zfcpdump_save_areas[cpu],
+ 	       (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
+@@ -397,32 +422,155 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
  
--.extern  it_tab
--.extern  il_tab
-+.extern  crypto_it_tab
-+.extern  crypto_il_tab
+ #endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */
  
- .align 4
+-/*
+- * Lets check how many CPUs we have.
+- */
+-static unsigned int __init smp_count_cpus(void)
++static int cpu_stopped(int cpu)
+ {
+-	unsigned int cpu, num_cpus;
+-	__u16 boot_cpu_addr;
++	__u32 status;
  
-@@ -312,14 +312,11 @@ aes_dec_blk:
- 1:	push    %ebx
- 	mov     in_blk+4(%esp),%r2
- 	push    %esi
--	mov     nrnd(%ebp),%r3   // number of rounds
-+	mov     klen(%ebp),%r3   // key size
- 	push    %edi
- #if dkey != 0
- 	lea     dkey(%ebp),%ebp  // key pointer
- #endif
--	mov     %r3,%r0
--	shl     $4,%r0
--	add     %r0,%ebp
- 	
- // input four columns and xor in first round key
+-	/*
+-	 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
+-	 */
++	/* Check for stopped state */
++	if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
++	    sigp_status_stored) {
++		if (status & 0x40)
++			return 1;
++	}
++	return 0;
++}
++
++static int cpu_known(int cpu_id)
++{
++	int cpu;
++
++	for_each_present_cpu(cpu) {
++		if (__cpu_logical_map[cpu] == cpu_id)
++			return 1;
++	}
++	return 0;
++}
++
++static int smp_rescan_cpus_sigp(cpumask_t avail)
++{
++	int cpu_id, logical_cpu;
++
++	logical_cpu = first_cpu(avail);
++	if (logical_cpu == NR_CPUS)
++		return 0;
++	for (cpu_id = 0; cpu_id <= 65535; cpu_id++) {
++		if (cpu_known(cpu_id))
++			continue;
++		__cpu_logical_map[logical_cpu] = cpu_id;
++		if (!cpu_stopped(logical_cpu))
++			continue;
++		cpu_set(logical_cpu, cpu_present_map);
++		smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
++		logical_cpu = next_cpu(logical_cpu, avail);
++		if (logical_cpu == NR_CPUS)
++			break;
++	}
++	return 0;
++}
++
++static int smp_rescan_cpus_sclp(cpumask_t avail)
++{
++	struct sclp_cpu_info *info;
++	int cpu_id, logical_cpu, cpu;
++	int rc;
++
++	logical_cpu = first_cpu(avail);
++	if (logical_cpu == NR_CPUS)
++		return 0;
++	info = kmalloc(sizeof(*info), GFP_KERNEL);
++	if (!info)
++		return -ENOMEM;
++	rc = sclp_get_cpu_info(info);
++	if (rc)
++		goto out;
++	for (cpu = 0; cpu < info->combined; cpu++) {
++		if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
++			continue;
++		cpu_id = info->cpu[cpu].address;
++		if (cpu_known(cpu_id))
++			continue;
++		__cpu_logical_map[logical_cpu] = cpu_id;
++		cpu_set(logical_cpu, cpu_present_map);
++		if (cpu >= info->configured)
++			smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
++		else
++			smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
++		logical_cpu = next_cpu(logical_cpu, avail);
++		if (logical_cpu == NR_CPUS)
++			break;
++	}
++out:
++	kfree(info);
++	return rc;
++}
++
++static int smp_rescan_cpus(void)
++{
++	cpumask_t avail;
++
++	cpus_xor(avail, cpu_possible_map, cpu_present_map);
++	if (smp_use_sigp_detection)
++		return smp_rescan_cpus_sigp(avail);
++	else
++		return smp_rescan_cpus_sclp(avail);
++}
++
++static void __init smp_detect_cpus(void)
++{
++	unsigned int cpu, c_cpus, s_cpus;
++	struct sclp_cpu_info *info;
++	u16 boot_cpu_addr, cpu_addr;
++
++	c_cpus = 1;
++	s_cpus = 0;
+ 	boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
+-	current_thread_info()->cpu = 0;
+-	num_cpus = 1;
+-	for (cpu = 0; cpu <= 65535; cpu++) {
+-		if ((__u16) cpu == boot_cpu_addr)
++	info = kmalloc(sizeof(*info), GFP_KERNEL);
++	if (!info)
++		panic("smp_detect_cpus failed to allocate memory\n");
++	/* Use sigp detection algorithm if sclp doesn't work. */
++	if (sclp_get_cpu_info(info)) {
++		smp_use_sigp_detection = 1;
++		for (cpu = 0; cpu <= 65535; cpu++) {
++			if (cpu == boot_cpu_addr)
++				continue;
++			__cpu_logical_map[CPU_INIT_NO] = cpu;
++			if (!cpu_stopped(CPU_INIT_NO))
++				continue;
++			smp_get_save_area(c_cpus, cpu);
++			c_cpus++;
++		}
++		goto out;
++	}
++
++	if (info->has_cpu_type) {
++		for (cpu = 0; cpu < info->combined; cpu++) {
++			if (info->cpu[cpu].address == boot_cpu_addr) {
++				smp_cpu_type = info->cpu[cpu].type;
++				break;
++			}
++		}
++	}
++
++	for (cpu = 0; cpu < info->combined; cpu++) {
++		if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
++			continue;
++		cpu_addr = info->cpu[cpu].address;
++		if (cpu_addr == boot_cpu_addr)
+ 			continue;
+-		__cpu_logical_map[1] = (__u16) cpu;
+-		if (signal_processor(1, sigp_sense) == sigp_not_operational)
++		__cpu_logical_map[CPU_INIT_NO] = cpu_addr;
++		if (!cpu_stopped(CPU_INIT_NO)) {
++			s_cpus++;
+ 			continue;
+-		smp_get_save_area(num_cpus, cpu);
+-		num_cpus++;
++		}
++		smp_get_save_area(c_cpus, cpu_addr);
++		c_cpus++;
+ 	}
+-	printk("Detected %d CPU's\n", (int) num_cpus);
+-	printk("Boot cpu address %2X\n", boot_cpu_addr);
+-	return num_cpus;
++out:
++	kfree(info);
++	printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus);
++	get_online_cpus();
++	smp_rescan_cpus();
++	put_online_cpus();
+ }
  
-@@ -333,27 +330,27 @@ aes_dec_blk:
- 	xor     12(%ebp),%r5
+ /*
+@@ -453,8 +601,6 @@ int __cpuinit start_secondary(void *cpuvoid)
+ 	return 0;
+ }
  
- 	sub     $8,%esp		// space for register saves on stack
--	sub     $16,%ebp	// increment to next round key
--	cmp     $12,%r3
-+	add     $16,%ebp	// increment to next round key
-+	cmp     $24,%r3
- 	jb      4f		// 10 rounds for 128-bit key
--	lea     -32(%ebp),%ebp
-+	lea     32(%ebp),%ebp
- 	je      3f		// 12 rounds for 192-bit key
--	lea     -32(%ebp),%ebp
+-DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
 -
--2:	inv_rnd1( +64(%ebp), it_tab)	// 14 rounds for 256-bit key
--	inv_rnd2( +48(%ebp), it_tab)
--3:	inv_rnd1( +32(%ebp), it_tab)	// 12 rounds for 192-bit key
--	inv_rnd2( +16(%ebp), it_tab)
--4:	inv_rnd1(    (%ebp), it_tab)	// 10 rounds for 128-bit key
--	inv_rnd2( -16(%ebp), it_tab)
--	inv_rnd1( -32(%ebp), it_tab)
--	inv_rnd2( -48(%ebp), it_tab)
--	inv_rnd1( -64(%ebp), it_tab)
--	inv_rnd2( -80(%ebp), it_tab)
--	inv_rnd1( -96(%ebp), it_tab)
--	inv_rnd2(-112(%ebp), it_tab)
--	inv_rnd1(-128(%ebp), it_tab)
--	inv_rnd2(-144(%ebp), il_tab)	// last round uses a different table
-+	lea     32(%ebp),%ebp
-+
-+2:	inv_rnd1( -64(%ebp), crypto_it_tab)	// 14 rounds for 256-bit key
-+	inv_rnd2( -48(%ebp), crypto_it_tab)
-+3:	inv_rnd1( -32(%ebp), crypto_it_tab)	// 12 rounds for 192-bit key
-+	inv_rnd2( -16(%ebp), crypto_it_tab)
-+4:	inv_rnd1(    (%ebp), crypto_it_tab)	// 10 rounds for 128-bit key
-+	inv_rnd2( +16(%ebp), crypto_it_tab)
-+	inv_rnd1( +32(%ebp), crypto_it_tab)
-+	inv_rnd2( +48(%ebp), crypto_it_tab)
-+	inv_rnd1( +64(%ebp), crypto_it_tab)
-+	inv_rnd2( +80(%ebp), crypto_it_tab)
-+	inv_rnd1( +96(%ebp), crypto_it_tab)
-+	inv_rnd2(+112(%ebp), crypto_it_tab)
-+	inv_rnd1(+128(%ebp), crypto_it_tab)
-+	inv_rnd2(+144(%ebp), crypto_il_tab)	// last round uses a different table
+ static void __init smp_create_idle(unsigned int cpu)
+ {
+ 	struct task_struct *p;
+@@ -470,37 +616,82 @@ static void __init smp_create_idle(unsigned int cpu)
+ 	spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock);
+ }
  
- // move final values to the output array.  CAUTION: the 
- // order of these assigns rely on the register mappings
-diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
-index 26b40de..a120f52 100644
---- a/arch/x86/crypto/aes-x86_64-asm_64.S
-+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
-@@ -8,10 +8,10 @@
-  * including this sentence is retained in full.
-  */
+-static int cpu_stopped(int cpu)
++static int __cpuinit smp_alloc_lowcore(int cpu)
+ {
+-	__u32 status;
++	unsigned long async_stack, panic_stack;
++	struct _lowcore *lowcore;
++	int lc_order;
++
++	lc_order = sizeof(long) == 8 ? 1 : 0;
++	lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
++	if (!lowcore)
++		return -ENOMEM;
++	async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
++	if (!async_stack)
++		goto out_async_stack;
++	panic_stack = __get_free_page(GFP_KERNEL);
++	if (!panic_stack)
++		goto out_panic_stack;
++
++	*lowcore = S390_lowcore;
++	lowcore->async_stack = async_stack + ASYNC_SIZE;
++	lowcore->panic_stack = panic_stack + PAGE_SIZE;
+ 
+-	/* Check for stopped state */
+-	if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
+-	    sigp_status_stored) {
+-		if (status & 0x40)
+-			return 1;
++#ifndef CONFIG_64BIT
++	if (MACHINE_HAS_IEEE) {
++		unsigned long save_area;
++
++		save_area = get_zeroed_page(GFP_KERNEL);
++		if (!save_area)
++			goto out_save_area;
++		lowcore->extended_save_area_addr = (u32) save_area;
+ 	}
++#endif
++	lowcore_ptr[cpu] = lowcore;
+ 	return 0;
++
++#ifndef CONFIG_64BIT
++out_save_area:
++	free_page(panic_stack);
++#endif
++out_panic_stack:
++	free_pages(async_stack, ASYNC_ORDER);
++out_async_stack:
++	free_pages((unsigned long) lowcore, lc_order);
++	return -ENOMEM;
+ }
  
--.extern aes_ft_tab
--.extern aes_it_tab
--.extern aes_fl_tab
--.extern aes_il_tab
-+.extern crypto_ft_tab
-+.extern crypto_it_tab
-+.extern crypto_fl_tab
-+.extern crypto_il_tab
+-/* Upping and downing of CPUs */
++#ifdef CONFIG_HOTPLUG_CPU
++static void smp_free_lowcore(int cpu)
++{
++	struct _lowcore *lowcore;
++	int lc_order;
++
++	lc_order = sizeof(long) == 8 ? 1 : 0;
++	lowcore = lowcore_ptr[cpu];
++#ifndef CONFIG_64BIT
++	if (MACHINE_HAS_IEEE)
++		free_page((unsigned long) lowcore->extended_save_area_addr);
++#endif
++	free_page(lowcore->panic_stack - PAGE_SIZE);
++	free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
++	free_pages((unsigned long) lowcore, lc_order);
++	lowcore_ptr[cpu] = NULL;
++}
++#endif /* CONFIG_HOTPLUG_CPU */
+ 
+-int __cpu_up(unsigned int cpu)
++/* Upping and downing of CPUs */
++int __cpuinit __cpu_up(unsigned int cpu)
+ {
+ 	struct task_struct *idle;
+ 	struct _lowcore *cpu_lowcore;
+ 	struct stack_frame *sf;
+ 	sigp_ccode ccode;
+-	int curr_cpu;
+ 
+-	for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
+-		__cpu_logical_map[cpu] = (__u16) curr_cpu;
+-		if (cpu_stopped(cpu))
+-			break;
+-	}
+-
+-	if (!cpu_stopped(cpu))
+-		return -ENODEV;
++	if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
++		return -EIO;
++	if (smp_alloc_lowcore(cpu))
++		return -ENOMEM;
  
- .text
+ 	ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
+ 				   cpu, sigp_set_prefix);
+@@ -515,6 +706,7 @@ int __cpu_up(unsigned int cpu)
+ 	cpu_lowcore = lowcore_ptr[cpu];
+ 	cpu_lowcore->kernel_stack = (unsigned long)
+ 		task_stack_page(idle) + THREAD_SIZE;
++	cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
+ 	sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
+ 				     - sizeof(struct pt_regs)
+ 				     - sizeof(struct stack_frame));
+@@ -528,6 +720,8 @@ int __cpu_up(unsigned int cpu)
+ 	cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
+ 	cpu_lowcore->current_task = (unsigned long) idle;
+ 	cpu_lowcore->cpu_data.cpu_nr = cpu;
++	cpu_lowcore->softirq_pending = 0;
++	cpu_lowcore->ext_call_fast = 0;
+ 	eieio();
+ 
+ 	while (signal_processor(cpu, sigp_restart) == sigp_busy)
+@@ -538,44 +732,20 @@ int __cpu_up(unsigned int cpu)
+ 	return 0;
+ }
  
-@@ -56,13 +56,13 @@
+-static unsigned int __initdata additional_cpus;
+-static unsigned int __initdata possible_cpus;
+-
+-void __init smp_setup_cpu_possible_map(void)
++static int __init setup_possible_cpus(char *s)
+ {
+-	unsigned int phy_cpus, pos_cpus, cpu;
+-
+-	phy_cpus = smp_count_cpus();
+-	pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
+-
+-	if (possible_cpus)
+-		pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS);
++	int pcpus, cpu;
+ 
+-	for (cpu = 0; cpu < pos_cpus; cpu++)
++	pcpus = simple_strtoul(s, NULL, 0);
++	cpu_possible_map = cpumask_of_cpu(0);
++	for (cpu = 1; cpu < pcpus && cpu < NR_CPUS; cpu++)
+ 		cpu_set(cpu, cpu_possible_map);
+-
+-	phy_cpus = min(phy_cpus, pos_cpus);
+-
+-	for (cpu = 0; cpu < phy_cpus; cpu++)
+-		cpu_set(cpu, cpu_present_map);
+-}
+-
+-#ifdef CONFIG_HOTPLUG_CPU
+-
+-static int __init setup_additional_cpus(char *s)
+-{
+-	additional_cpus = simple_strtoul(s, NULL, 0);
+-	return 0;
+-}
+-early_param("additional_cpus", setup_additional_cpus);
+-
+-static int __init setup_possible_cpus(char *s)
+-{
+-	possible_cpus = simple_strtoul(s, NULL, 0);
+ 	return 0;
+ }
+ early_param("possible_cpus", setup_possible_cpus);
+ 
++#ifdef CONFIG_HOTPLUG_CPU
++
+ int __cpu_disable(void)
+ {
+ 	struct ec_creg_mask_parms cr_parms;
+@@ -612,7 +782,8 @@ void __cpu_die(unsigned int cpu)
+ 	/* Wait until target cpu is down */
+ 	while (!smp_cpu_not_running(cpu))
+ 		cpu_relax();
+-	printk("Processor %d spun down\n", cpu);
++	smp_free_lowcore(cpu);
++	printk(KERN_INFO "Processor %d spun down\n", cpu);
+ }
+ 
+ void cpu_die(void)
+@@ -625,49 +796,19 @@ void cpu_die(void)
+ 
+ #endif /* CONFIG_HOTPLUG_CPU */
+ 
+-/*
+- *	Cycle through the processors and setup structures.
+- */
+-
+ void __init smp_prepare_cpus(unsigned int max_cpus)
+ {
+-	unsigned long stack;
+ 	unsigned int cpu;
+-	int i;
++
++	smp_detect_cpus();
+ 
+ 	/* request the 0x1201 emergency signal external interrupt */
+ 	if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
+ 		panic("Couldn't request external interrupt 0x1201");
+ 	memset(lowcore_ptr, 0, sizeof(lowcore_ptr));
+-	/*
+-	 *  Initialize prefix pages and stacks for all possible cpus
+-	 */
+ 	print_cpu_info(&S390_lowcore.cpu_data);
++	smp_alloc_lowcore(smp_processor_id());
+ 
+-	for_each_possible_cpu(i) {
+-		lowcore_ptr[i] = (struct _lowcore *)
+-			__get_free_pages(GFP_KERNEL | GFP_DMA,
+-					 sizeof(void*) == 8 ? 1 : 0);
+-		stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
+-		if (!lowcore_ptr[i] || !stack)
+-			panic("smp_boot_cpus failed to allocate memory\n");
+-
+-		*(lowcore_ptr[i]) = S390_lowcore;
+-		lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE;
+-		stack = __get_free_pages(GFP_KERNEL, 0);
+-		if (!stack)
+-			panic("smp_boot_cpus failed to allocate memory\n");
+-		lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE;
+-#ifndef CONFIG_64BIT
+-		if (MACHINE_HAS_IEEE) {
+-			lowcore_ptr[i]->extended_save_area_addr =
+-				(__u32) __get_free_pages(GFP_KERNEL, 0);
+-			if (!lowcore_ptr[i]->extended_save_area_addr)
+-				panic("smp_boot_cpus failed to "
+-				      "allocate memory\n");
+-		}
+-#endif
+-	}
+ #ifndef CONFIG_64BIT
+ 	if (MACHINE_HAS_IEEE)
+ 		ctl_set_bit(14, 29); /* enable extended save area */
+@@ -683,15 +824,17 @@ void __init smp_prepare_boot_cpu(void)
+ {
+ 	BUG_ON(smp_processor_id() != 0);
+ 
++	current_thread_info()->cpu = 0;
++	cpu_set(0, cpu_present_map);
+ 	cpu_set(0, cpu_online_map);
+ 	S390_lowcore.percpu_offset = __per_cpu_offset[0];
+ 	current_set[0] = current;
++	smp_cpu_state[0] = CPU_STATE_CONFIGURED;
+ 	spin_lock_init(&(&__get_cpu_var(s390_idle))->lock);
+ }
+ 
+ void __init smp_cpus_done(unsigned int max_cpus)
+ {
+-	cpu_present_map = cpu_possible_map;
+ }
+ 
+ /*
+@@ -705,7 +848,79 @@ int setup_profiling_timer(unsigned int multiplier)
+ 	return 0;
+ }
+ 
+-static DEFINE_PER_CPU(struct cpu, cpu_devices);
++#ifdef CONFIG_HOTPLUG_CPU
++static ssize_t cpu_configure_show(struct sys_device *dev, char *buf)
++{
++	ssize_t count;
++
++	mutex_lock(&smp_cpu_state_mutex);
++	count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
++	mutex_unlock(&smp_cpu_state_mutex);
++	return count;
++}
++
++static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf,
++				   size_t count)
++{
++	int cpu = dev->id;
++	int val, rc;
++	char delim;
++
++	if (sscanf(buf, "%d %c", &val, &delim) != 1)
++		return -EINVAL;
++	if (val != 0 && val != 1)
++		return -EINVAL;
++
++	mutex_lock(&smp_cpu_state_mutex);
++	get_online_cpus();
++	rc = -EBUSY;
++	if (cpu_online(cpu))
++		goto out;
++	rc = 0;
++	switch (val) {
++	case 0:
++		if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
++			rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
++			if (!rc)
++				smp_cpu_state[cpu] = CPU_STATE_STANDBY;
++		}
++		break;
++	case 1:
++		if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
++			rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
++			if (!rc)
++				smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
++		}
++		break;
++	default:
++		break;
++	}
++out:
++	put_online_cpus();
++	mutex_unlock(&smp_cpu_state_mutex);
++	return rc ? rc : count;
++}
++static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
++#endif /* CONFIG_HOTPLUG_CPU */
++
++static ssize_t show_cpu_address(struct sys_device *dev, char *buf)
++{
++	return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
++}
++static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
++
++
++static struct attribute *cpu_common_attrs[] = {
++#ifdef CONFIG_HOTPLUG_CPU
++	&attr_configure.attr,
++#endif
++	&attr_address.attr,
++	NULL,
++};
++
++static struct attribute_group cpu_common_attr_group = {
++	.attrs = cpu_common_attrs,
++};
+ 
+ static ssize_t show_capability(struct sys_device *dev, char *buf)
+ {
+@@ -750,15 +965,15 @@ static ssize_t show_idle_time(struct sys_device *dev, char *buf)
+ }
+ static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
+ 
+-static struct attribute *cpu_attrs[] = {
++static struct attribute *cpu_online_attrs[] = {
+ 	&attr_capability.attr,
+ 	&attr_idle_count.attr,
+ 	&attr_idle_time_us.attr,
+ 	NULL,
+ };
+ 
+-static struct attribute_group cpu_attr_group = {
+-	.attrs = cpu_attrs,
++static struct attribute_group cpu_online_attr_group = {
++	.attrs = cpu_online_attrs,
+ };
+ 
+ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
+@@ -778,12 +993,12 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
+ 		idle->idle_time = 0;
+ 		idle->idle_count = 0;
+ 		spin_unlock_irq(&idle->lock);
+-		if (sysfs_create_group(&s->kobj, &cpu_attr_group))
++		if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
+ 			return NOTIFY_BAD;
+ 		break;
+ 	case CPU_DEAD:
+ 	case CPU_DEAD_FROZEN:
+-		sysfs_remove_group(&s->kobj, &cpu_attr_group);
++		sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
+ 		break;
+ 	}
+ 	return NOTIFY_OK;
+@@ -793,6 +1008,62 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = {
+ 	.notifier_call = smp_cpu_notify,
+ };
+ 
++static int smp_add_present_cpu(int cpu)
++{
++	struct cpu *c = &per_cpu(cpu_devices, cpu);
++	struct sys_device *s = &c->sysdev;
++	int rc;
++
++	c->hotpluggable = 1;
++	rc = register_cpu(c, cpu);
++	if (rc)
++		goto out;
++	rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
++	if (rc)
++		goto out_cpu;
++	if (!cpu_online(cpu))
++		goto out;
++	rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
++	if (!rc)
++		return 0;
++	sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
++out_cpu:
++#ifdef CONFIG_HOTPLUG_CPU
++	unregister_cpu(c);
++#endif
++out:
++	return rc;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static ssize_t rescan_store(struct sys_device *dev, const char *buf,
++			    size_t count)
++{
++	cpumask_t newcpus;
++	int cpu;
++	int rc;
++
++	mutex_lock(&smp_cpu_state_mutex);
++	get_online_cpus();
++	newcpus = cpu_present_map;
++	rc = smp_rescan_cpus();
++	if (rc)
++		goto out;
++	cpus_andnot(newcpus, cpu_present_map, newcpus);
++	for_each_cpu_mask(cpu, newcpus) {
++		rc = smp_add_present_cpu(cpu);
++		if (rc)
++			cpu_clear(cpu, cpu_present_map);
++	}
++	rc = 0;
++out:
++	put_online_cpus();
++	mutex_unlock(&smp_cpu_state_mutex);
++	return rc ? rc : count;
++}
++static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
++#endif /* CONFIG_HOTPLUG_CPU */
++
+ static int __init topology_init(void)
+ {
+ 	int cpu;
+@@ -800,16 +1071,14 @@ static int __init topology_init(void)
+ 
+ 	register_cpu_notifier(&smp_cpu_nb);
+ 
+-	for_each_possible_cpu(cpu) {
+-		struct cpu *c = &per_cpu(cpu_devices, cpu);
+-		struct sys_device *s = &c->sysdev;
+-
+-		c->hotpluggable = 1;
+-		register_cpu(c, cpu);
+-		if (!cpu_online(cpu))
+-			continue;
+-		s = &c->sysdev;
+-		rc = sysfs_create_group(&s->kobj, &cpu_attr_group);
++#ifdef CONFIG_HOTPLUG_CPU
++	rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
++			       &attr_rescan.attr);
++	if (rc)
++		return rc;
++#endif
++	for_each_present_cpu(cpu) {
++		rc = smp_add_present_cpu(cpu);
+ 		if (rc)
+ 			return rc;
+ 	}
+diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
+index 22b800c..3bbac12 100644
+--- a/arch/s390/kernel/time.c
++++ b/arch/s390/kernel/time.c
+@@ -1145,7 +1145,7 @@ static void etr_work_fn(struct work_struct *work)
+  * Sysfs interface functions
+  */
+ static struct sysdev_class etr_sysclass = {
+-	set_kset_name("etr")
++	.name	= "etr",
+ };
+ 
+ static struct sys_device etr_port0_dev = {
+diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
+index 8ed16a8..52b8342 100644
+--- a/arch/s390/kernel/traps.c
++++ b/arch/s390/kernel/traps.c
+@@ -31,6 +31,7 @@
+ #include <linux/reboot.h>
+ #include <linux/kprobes.h>
+ #include <linux/bug.h>
++#include <linux/utsname.h>
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
+@@ -168,9 +169,16 @@ void show_stack(struct task_struct *task, unsigned long *sp)
+  */
+ void dump_stack(void)
+ {
++	printk("CPU: %d %s %s %.*s\n",
++	       task_thread_info(current)->cpu, print_tainted(),
++	       init_utsname()->release,
++	       (int)strcspn(init_utsname()->version, " "),
++	       init_utsname()->version);
++	printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
++	       current->comm, current->pid, current,
++	       (void *) current->thread.ksp);
+ 	show_stack(NULL, NULL);
+ }
+-
+ EXPORT_SYMBOL(dump_stack);
+ 
+ static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
+@@ -258,8 +266,14 @@ void die(const char * str, struct pt_regs * regs, long err)
+ 	console_verbose();
+ 	spin_lock_irq(&die_lock);
+ 	bust_spinlocks(1);
+-	printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
+-	print_modules();
++	printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
++#ifdef CONFIG_PREEMPT
++	printk("PREEMPT ");
++#endif
++#ifdef CONFIG_SMP
++	printk("SMP");
++#endif
++	printk("\n");
+ 	notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
+ 	show_regs(regs);
+ 	bust_spinlocks(0);
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index 849120e..9361591 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -17,6 +17,12 @@ ENTRY(_start)
+ jiffies = jiffies_64;
+ #endif
+ 
++PHDRS {
++	text PT_LOAD FLAGS(5);	/* R_E */
++	data PT_LOAD FLAGS(7);	/* RWE */
++	note PT_NOTE FLAGS(0);	/* ___ */
++}
++
+ SECTIONS
+ {
+ 	. = 0x00000000;
+@@ -33,6 +39,9 @@ SECTIONS
+ 
+ 	_etext = .;		/* End of text section */
+ 
++	NOTES :text :note
++	BUG_TABLE :text
++
+ 	RODATA
+ 
+ #ifdef CONFIG_SHARED_KERNEL
+@@ -49,9 +58,6 @@ SECTIONS
+ 		__stop___ex_table = .;
+ 	}
+ 
+-	NOTES
+-	BUG_TABLE
+-
+ 	.data : {		/* Data */
+ 		DATA_DATA
+ 		CONSTRUCTORS
+diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
+index 8d76403..e41f400 100644
+--- a/arch/s390/lib/spinlock.c
++++ b/arch/s390/lib/spinlock.c
+@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu)
+ 		_raw_yield();
+ }
+ 
+-void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
++void _raw_spin_lock_wait(raw_spinlock_t *lp)
+ {
+ 	int count = spin_retry;
+ 	unsigned int cpu = ~smp_processor_id();
+@@ -53,15 +53,36 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
+ 		}
+ 		if (__raw_spin_is_locked(lp))
+ 			continue;
+-		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) {
+-			lp->owner_pc = pc;
++		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
+ 			return;
+-		}
+ 	}
+ }
+ EXPORT_SYMBOL(_raw_spin_lock_wait);
+ 
+-int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
++void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
++{
++	int count = spin_retry;
++	unsigned int cpu = ~smp_processor_id();
++
++	local_irq_restore(flags);
++	while (1) {
++		if (count-- <= 0) {
++			unsigned int owner = lp->owner_cpu;
++			if (owner != 0)
++				_raw_yield_cpu(~owner);
++			count = spin_retry;
++		}
++		if (__raw_spin_is_locked(lp))
++			continue;
++		local_irq_disable();
++		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
++			return;
++		local_irq_restore(flags);
++	}
++}
++EXPORT_SYMBOL(_raw_spin_lock_wait_flags);
++
++int _raw_spin_trylock_retry(raw_spinlock_t *lp)
+ {
+ 	unsigned int cpu = ~smp_processor_id();
+ 	int count;
+@@ -69,10 +90,8 @@ int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
+ 	for (count = spin_retry; count > 0; count--) {
+ 		if (__raw_spin_is_locked(lp))
+ 			continue;
+-		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) {
+-			lp->owner_pc = pc;
++		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
+ 			return 1;
+-		}
+ 	}
+ 	return 0;
+ }
+diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
+index 394980b..880b0eb 100644
+--- a/arch/s390/mm/extmem.c
++++ b/arch/s390/mm/extmem.c
+@@ -83,7 +83,7 @@ struct dcss_segment {
+ };
+ 
+ static DEFINE_MUTEX(dcss_lock);
+-static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list);
++static LIST_HEAD(dcss_list);
+ static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
+ 					"EW/EN-MIXED" };
+ 
+diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
+index fb9c5a8..79d13a1 100644
+--- a/arch/s390/mm/vmem.c
++++ b/arch/s390/mm/vmem.c
+@@ -15,10 +15,6 @@
+ #include <asm/setup.h>
+ #include <asm/tlbflush.h>
+ 
+-unsigned long vmalloc_end;
+-EXPORT_SYMBOL(vmalloc_end);
+-
+-static struct page *vmem_map;
+ static DEFINE_MUTEX(vmem_mutex);
+ 
+ struct memory_segment {
+@@ -188,8 +184,8 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
+ 	pte_t  pte;
+ 	int ret = -ENOMEM;
+ 
+-	map_start = vmem_map + PFN_DOWN(start);
+-	map_end	= vmem_map + PFN_DOWN(start + size);
++	map_start = VMEM_MAP + PFN_DOWN(start);
++	map_end	= VMEM_MAP + PFN_DOWN(start + size);
+ 
+ 	start_addr = (unsigned long) map_start & PAGE_MASK;
+ 	end_addr = PFN_ALIGN((unsigned long) map_end);
+@@ -240,10 +236,10 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
+ {
+ 	int ret;
+ 
+-	ret = vmem_add_range(start, size);
++	ret = vmem_add_mem_map(start, size);
+ 	if (ret)
+ 		return ret;
+-	return vmem_add_mem_map(start, size);
++	return vmem_add_range(start, size);
+ }
+ 
+ /*
+@@ -254,7 +250,7 @@ static int insert_memory_segment(struct memory_segment *seg)
+ {
+ 	struct memory_segment *tmp;
+ 
+-	if (PFN_DOWN(seg->start + seg->size) > max_pfn ||
++	if (seg->start + seg->size >= VMALLOC_START ||
+ 	    seg->start + seg->size < seg->start)
+ 		return -ERANGE;
+ 
+@@ -357,17 +353,15 @@ out:
+ 
+ /*
+  * map whole physical memory to virtual memory (identity mapping)
++ * we reserve enough space in the vmalloc area for vmemmap to hotplug
++ * additional memory segments.
+  */
+ void __init vmem_map_init(void)
+ {
+-	unsigned long map_size;
+ 	int i;
+ 
+-	map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page);
+-	vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size);
+-	vmem_map = (struct page *) vmalloc_end;
+-	NODE_DATA(0)->node_mem_map = vmem_map;
+-
++	BUILD_BUG_ON((unsigned long)VMEM_MAP + VMEM_MAP_SIZE > VMEM_MAP_MAX);
++	NODE_DATA(0)->node_mem_map = VMEM_MAP;
+ 	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
+ 		vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
+ }
+@@ -382,7 +376,7 @@ static int __init vmem_convert_memory_chunk(void)
+ 	int i;
+ 
+ 	mutex_lock(&vmem_mutex);
+-	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
++	for (i = 0; i < MEMORY_CHUNKS; i++) {
+ 		if (!memory_chunk[i].size)
+ 			continue;
+ 		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
+diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c
+index eebcd47..51b57c0 100644
+--- a/arch/sh/drivers/dma/dma-sysfs.c
++++ b/arch/sh/drivers/dma/dma-sysfs.c
+@@ -19,7 +19,7 @@
+ #include <asm/dma.h>
+ 
+ static struct sysdev_class dma_sysclass = {
+-	set_kset_name("dma"),
++	.name = "dma",
+ };
+ EXPORT_SYMBOL(dma_sysclass);
+ 
+diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
+index b22a78c..3008c00 100644
+--- a/arch/sh/kernel/cpu/sh4/sq.c
++++ b/arch/sh/kernel/cpu/sh4/sq.c
+@@ -341,17 +341,18 @@ static int __devinit sq_sysdev_add(struct sys_device *sysdev)
+ {
+ 	unsigned int cpu = sysdev->id;
+ 	struct kobject *kobj;
++	int error;
+ 
+ 	sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
+ 	if (unlikely(!sq_kobject[cpu]))
+ 		return -ENOMEM;
+ 
+ 	kobj = sq_kobject[cpu];
+-	kobj->parent = &sysdev->kobj;
+-	kobject_set_name(kobj, "%s", "sq");
+-	kobj->ktype = &ktype_percpu_entry;
+-
+-	return kobject_register(kobj);
++	error = kobject_init_and_add(kobj, &ktype_percpu_entry, &sysdev->kobj,
++				     "%s", "sq");
++	if (!error)
++		kobject_uevent(kobj, KOBJ_ADD);
++	return error;
+ }
+ 
+ static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
+@@ -359,7 +360,7 @@ static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
+ 	unsigned int cpu = sysdev->id;
+ 	struct kobject *kobj = sq_kobject[cpu];
+ 
+-	kobject_unregister(kobj);
++	kobject_put(kobj);
+ 	return 0;
+ }
+ 
+diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
+index a3a67d1..2bc04bf 100644
+--- a/arch/sh/kernel/time.c
++++ b/arch/sh/kernel/time.c
+@@ -174,7 +174,7 @@ int timer_resume(struct sys_device *dev)
+ #endif
+ 
+ static struct sysdev_class timer_sysclass = {
+-	set_kset_name("timer"),
++	.name	 = "timer",
+ 	.suspend = timer_suspend,
+ 	.resume	 = timer_resume,
+ };
+diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
+index 46bb609..3874c2d 100644
+--- a/arch/x86/crypto/Makefile
++++ b/arch/x86/crypto/Makefile
+@@ -4,12 +4,16 @@
+ 
+ obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
+ obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
++obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
+ 
+ obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
+ obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
++obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
+ 
+-aes-i586-y := aes-i586-asm_32.o aes_32.o
+-twofish-i586-y := twofish-i586-asm_32.o twofish_32.o
++aes-i586-y := aes-i586-asm_32.o aes_glue.o
++twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o
++salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o
+ 
+-aes-x86_64-y := aes-x86_64-asm_64.o aes_64.o
+-twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_64.o
++aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
++twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
++salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
+diff --git a/arch/x86/crypto/aes-i586-asm_32.S b/arch/x86/crypto/aes-i586-asm_32.S
+index f942f0c..1093bed 100644
+--- a/arch/x86/crypto/aes-i586-asm_32.S
++++ b/arch/x86/crypto/aes-i586-asm_32.S
+@@ -46,9 +46,9 @@
+ #define in_blk 16
+ 
+ /* offsets in crypto_tfm structure */
+-#define ekey (crypto_tfm_ctx_offset + 0)
+-#define nrnd (crypto_tfm_ctx_offset + 256)
+-#define dkey (crypto_tfm_ctx_offset + 260)
++#define klen (crypto_tfm_ctx_offset + 0)
++#define ekey (crypto_tfm_ctx_offset + 4)
++#define dkey (crypto_tfm_ctx_offset + 244)
+ 
+ // register mapping for encrypt and decrypt subroutines
+ 
+@@ -221,8 +221,8 @@
+ 
+ .global  aes_enc_blk
+ 
+-.extern  ft_tab
+-.extern  fl_tab
++.extern  crypto_ft_tab
++.extern  crypto_fl_tab
+ 
+ .align 4
+ 
+@@ -236,7 +236,7 @@ aes_enc_blk:
+ 1:	push    %ebx
+ 	mov     in_blk+4(%esp),%r2
+ 	push    %esi
+-	mov     nrnd(%ebp),%r3   // number of rounds
++	mov     klen(%ebp),%r3   // key size
+ 	push    %edi
+ #if ekey != 0
+ 	lea     ekey(%ebp),%ebp  // key pointer
+@@ -255,26 +255,26 @@ aes_enc_blk:
+ 
+ 	sub     $8,%esp		// space for register saves on stack
+ 	add     $16,%ebp	// increment to next round key
+-	cmp     $12,%r3
++	cmp     $24,%r3
+ 	jb      4f		// 10 rounds for 128-bit key
+ 	lea     32(%ebp),%ebp
+ 	je      3f		// 12 rounds for 192-bit key
+ 	lea     32(%ebp),%ebp
+ 
+-2:	fwd_rnd1( -64(%ebp) ,ft_tab)	// 14 rounds for 256-bit key
+-	fwd_rnd2( -48(%ebp) ,ft_tab)
+-3:	fwd_rnd1( -32(%ebp) ,ft_tab)	// 12 rounds for 192-bit key
+-	fwd_rnd2( -16(%ebp) ,ft_tab)
+-4:	fwd_rnd1(    (%ebp) ,ft_tab)	// 10 rounds for 128-bit key
+-	fwd_rnd2( +16(%ebp) ,ft_tab)
+-	fwd_rnd1( +32(%ebp) ,ft_tab)
+-	fwd_rnd2( +48(%ebp) ,ft_tab)
+-	fwd_rnd1( +64(%ebp) ,ft_tab)
+-	fwd_rnd2( +80(%ebp) ,ft_tab)
+-	fwd_rnd1( +96(%ebp) ,ft_tab)
+-	fwd_rnd2(+112(%ebp) ,ft_tab)
+-	fwd_rnd1(+128(%ebp) ,ft_tab)
+-	fwd_rnd2(+144(%ebp) ,fl_tab)	// last round uses a different table
++2:	fwd_rnd1( -64(%ebp), crypto_ft_tab)	// 14 rounds for 256-bit key
++	fwd_rnd2( -48(%ebp), crypto_ft_tab)
++3:	fwd_rnd1( -32(%ebp), crypto_ft_tab)	// 12 rounds for 192-bit key
++	fwd_rnd2( -16(%ebp), crypto_ft_tab)
++4:	fwd_rnd1(    (%ebp), crypto_ft_tab)	// 10 rounds for 128-bit key
++	fwd_rnd2( +16(%ebp), crypto_ft_tab)
++	fwd_rnd1( +32(%ebp), crypto_ft_tab)
++	fwd_rnd2( +48(%ebp), crypto_ft_tab)
++	fwd_rnd1( +64(%ebp), crypto_ft_tab)
++	fwd_rnd2( +80(%ebp), crypto_ft_tab)
++	fwd_rnd1( +96(%ebp), crypto_ft_tab)
++	fwd_rnd2(+112(%ebp), crypto_ft_tab)
++	fwd_rnd1(+128(%ebp), crypto_ft_tab)
++	fwd_rnd2(+144(%ebp), crypto_fl_tab)	// last round uses a different table
+ 
+ // move final values to the output array.  CAUTION: the 
+ // order of these assigns rely on the register mappings
+@@ -297,8 +297,8 @@ aes_enc_blk:
+ 
+ .global  aes_dec_blk
+ 
+-.extern  it_tab
+-.extern  il_tab
++.extern  crypto_it_tab
++.extern  crypto_il_tab
+ 
+ .align 4
+ 
+@@ -312,14 +312,11 @@ aes_dec_blk:
+ 1:	push    %ebx
+ 	mov     in_blk+4(%esp),%r2
+ 	push    %esi
+-	mov     nrnd(%ebp),%r3   // number of rounds
++	mov     klen(%ebp),%r3   // key size
+ 	push    %edi
+ #if dkey != 0
+ 	lea     dkey(%ebp),%ebp  // key pointer
+ #endif
+-	mov     %r3,%r0
+-	shl     $4,%r0
+-	add     %r0,%ebp
+ 	
+ // input four columns and xor in first round key
+ 
+@@ -333,27 +330,27 @@ aes_dec_blk:
+ 	xor     12(%ebp),%r5
+ 
+ 	sub     $8,%esp		// space for register saves on stack
+-	sub     $16,%ebp	// increment to next round key
+-	cmp     $12,%r3
++	add     $16,%ebp	// increment to next round key
++	cmp     $24,%r3
+ 	jb      4f		// 10 rounds for 128-bit key
+-	lea     -32(%ebp),%ebp
++	lea     32(%ebp),%ebp
+ 	je      3f		// 12 rounds for 192-bit key
+-	lea     -32(%ebp),%ebp
+-
+-2:	inv_rnd1( +64(%ebp), it_tab)	// 14 rounds for 256-bit key
+-	inv_rnd2( +48(%ebp), it_tab)
+-3:	inv_rnd1( +32(%ebp), it_tab)	// 12 rounds for 192-bit key
+-	inv_rnd2( +16(%ebp), it_tab)
+-4:	inv_rnd1(    (%ebp), it_tab)	// 10 rounds for 128-bit key
+-	inv_rnd2( -16(%ebp), it_tab)
+-	inv_rnd1( -32(%ebp), it_tab)
+-	inv_rnd2( -48(%ebp), it_tab)
+-	inv_rnd1( -64(%ebp), it_tab)
+-	inv_rnd2( -80(%ebp), it_tab)
+-	inv_rnd1( -96(%ebp), it_tab)
+-	inv_rnd2(-112(%ebp), it_tab)
+-	inv_rnd1(-128(%ebp), it_tab)
+-	inv_rnd2(-144(%ebp), il_tab)	// last round uses a different table
++	lea     32(%ebp),%ebp
++
++2:	inv_rnd1( -64(%ebp), crypto_it_tab)	// 14 rounds for 256-bit key
++	inv_rnd2( -48(%ebp), crypto_it_tab)
++3:	inv_rnd1( -32(%ebp), crypto_it_tab)	// 12 rounds for 192-bit key
++	inv_rnd2( -16(%ebp), crypto_it_tab)
++4:	inv_rnd1(    (%ebp), crypto_it_tab)	// 10 rounds for 128-bit key
++	inv_rnd2( +16(%ebp), crypto_it_tab)
++	inv_rnd1( +32(%ebp), crypto_it_tab)
++	inv_rnd2( +48(%ebp), crypto_it_tab)
++	inv_rnd1( +64(%ebp), crypto_it_tab)
++	inv_rnd2( +80(%ebp), crypto_it_tab)
++	inv_rnd1( +96(%ebp), crypto_it_tab)
++	inv_rnd2(+112(%ebp), crypto_it_tab)
++	inv_rnd1(+128(%ebp), crypto_it_tab)
++	inv_rnd2(+144(%ebp), crypto_il_tab)	// last round uses a different table
+ 
+ // move final values to the output array.  CAUTION: the 
+ // order of these assigns rely on the register mappings
+diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
+index 26b40de..a120f52 100644
+--- a/arch/x86/crypto/aes-x86_64-asm_64.S
++++ b/arch/x86/crypto/aes-x86_64-asm_64.S
+@@ -8,10 +8,10 @@
+  * including this sentence is retained in full.
+  */
+ 
+-.extern aes_ft_tab
+-.extern aes_it_tab
+-.extern aes_fl_tab
+-.extern aes_il_tab
++.extern crypto_ft_tab
++.extern crypto_it_tab
++.extern crypto_fl_tab
++.extern crypto_il_tab
+ 
+ .text
+ 
+@@ -56,13 +56,13 @@
  	.align	8;			\
  FUNC:	movq	r1,r2;			\
  	movq	r3,r4;			\
@@ -49188,13 +51712,83 @@
 +	kobject_put(&dev->kobj);
  }
 diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
-index ddd3a25..8a70a9e 100644
+index ddd3a25..6b658d8 100644
 --- a/drivers/crypto/Kconfig
 +++ b/drivers/crypto/Kconfig
-@@ -83,4 +83,21 @@ config ZCRYPT_MONOLITHIC
+@@ -48,8 +48,6 @@ config CRYPTO_DEV_PADLOCK_SHA
+ 	  If unsure say M. The compiled module will be
+ 	  called padlock-sha.ko
+ 
+-source "arch/s390/crypto/Kconfig"
+-
+ config CRYPTO_DEV_GEODE
+ 	tristate "Support for the Geode LX AES engine"
+ 	depends on X86_32 && PCI
+@@ -83,4 +81,82 @@ config ZCRYPT_MONOLITHIC
  	  that contains all parts of the crypto device driver (ap bus,
  	  request router and all the card drivers).
  
++config CRYPTO_SHA1_S390
++	tristate "SHA1 digest algorithm"
++	depends on S390
++	select CRYPTO_ALGAPI
++	help
++	  This is the s390 hardware accelerated implementation of the
++	  SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
++
++config CRYPTO_SHA256_S390
++	tristate "SHA256 digest algorithm"
++	depends on S390
++	select CRYPTO_ALGAPI
++	help
++	  This is the s390 hardware accelerated implementation of the
++	  SHA256 secure hash standard (DFIPS 180-2).
++
++	  This version of SHA implements a 256 bit hash with 128 bits of
++	  security against collision attacks.
++
++config CRYPTO_DES_S390
++	tristate "DES and Triple DES cipher algorithms"
++	depends on S390
++	select CRYPTO_ALGAPI
++	select CRYPTO_BLKCIPHER
++	help
++	  This us the s390 hardware accelerated implementation of the
++	  DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
++
++config CRYPTO_AES_S390
++	tristate "AES cipher algorithms"
++	depends on S390
++	select CRYPTO_ALGAPI
++	select CRYPTO_BLKCIPHER
++	help
++	  This is the s390 hardware accelerated implementation of the
++	  AES cipher algorithms (FIPS-197). AES uses the Rijndael
++	  algorithm.
++
++	  Rijndael appears to be consistently a very good performer in
++	  both hardware and software across a wide range of computing
++	  environments regardless of its use in feedback or non-feedback
++	  modes. Its key setup time is excellent, and its key agility is
++	  good. Rijndael's very low memory requirements make it very well
++	  suited for restricted-space environments, in which it also
++	  demonstrates excellent performance. Rijndael's operations are
++	  among the easiest to defend against power and timing attacks.
++
++	  On s390 the System z9-109 currently only supports the key size
++	  of 128 bit.
++
++config S390_PRNG
++	tristate "Pseudo random number generator device driver"
++	depends on S390
++	default "m"
++	help
++	  Select this option if you want to use the s390 pseudo random number
++	  generator. The PRNG is part of the cryptographic processor functions
++	  and uses triple-DES to generate secure random numbers like the
++	  ANSI X9.17 standard. The PRNG is usable via the char device
++	  /dev/prandom.
++
 +config CRYPTO_DEV_HIFN_795X
 +	tristate "Driver HIFN 795x crypto accelerator chips"
 +	select CRYPTO_DES
@@ -53596,28 +56190,61 @@
  
  /* NOTE:  some of this ISP1301 setup is specific to H2 boards;
 diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
-index fb06555..ee01e27 100644
+index fb06555..64df55e 100644
 --- a/drivers/ide/Kconfig
 +++ b/drivers/ide/Kconfig
-@@ -374,17 +374,6 @@ comment "PCI IDE chipsets support"
+@@ -325,7 +325,7 @@ config BLK_DEV_PLATFORM
+ 	  If unsure, say N.
+ 
+ config BLK_DEV_CMD640
+-	bool "CMD640 chipset bugfix/support"
++	tristate "CMD640 chipset bugfix/support"
+ 	depends on X86
+ 	---help---
+ 	  The CMD-Technologies CMD640 IDE chip is used on many common 486 and
+@@ -359,9 +359,8 @@ config BLK_DEV_CMD640_ENHANCED
+ 	  Otherwise say N.
+ 
+ config BLK_DEV_IDEPNP
+-	bool "PNP EIDE support"
++	tristate "PNP EIDE support"
+ 	depends on PNP
+-	select IDE_GENERIC
+ 	help
+ 	  If you have a PnP (Plug and Play) compatible EIDE card and
+ 	  would like the kernel to automatically detect and activate
+@@ -374,19 +373,20 @@ comment "PCI IDE chipsets support"
  config BLK_DEV_IDEPCI
  	bool
  
 -config IDEPCI_SHARE_IRQ
 -	bool "Sharing PCI IDE interrupts support"
 -	depends on BLK_DEV_IDEPCI
--	help
++config IDEPCI_PCIBUS_ORDER
++	bool "Probe IDE PCI devices in the PCI bus order (DEPRECATED)"
++	depends on BLK_DEV_IDE=y && BLK_DEV_IDEPCI
++	default y
+ 	help
 -	  Some ATA/IDE chipsets have hardware support which allows for
 -	  sharing a single IRQ with other cards. To enable support for
 -	  this in the ATA/IDE driver, say Y here.
--
++	  Probe IDE PCI devices in the order in which they appear on the
++	  PCI bus (i.e. 00:1f.1 PCI device before 02:01.0 PCI device)
++	  instead of the order in which IDE PCI host drivers are loaded.
+ 
 -	  It is safe to say Y to this question, in most cases.
 -	  If unsure, say N.
--
- config IDEPCI_PCIBUS_ORDER
- 	def_bool BLK_DEV_IDE=y && BLK_DEV_IDEPCI
- 
-@@ -707,7 +696,6 @@ config BLK_DEV_SVWKS
++	  Please note that this method of assuring stable naming of
++	  IDE devices is unreliable and use other means for achieving
++	  it (i.e. udev).
+ 
+-config IDEPCI_PCIBUS_ORDER
+-	def_bool BLK_DEV_IDE=y && BLK_DEV_IDEPCI
++	  If in doubt, say N.
+ 
+ # TODO: split it on per host driver config options (or module parameters)
+ config BLK_DEV_OFFBOARD
+@@ -707,7 +707,6 @@ config BLK_DEV_SVWKS
  config BLK_DEV_SGIIOC4
  	tristate "Silicon Graphics IOC4 chipset ATA/ATAPI support"
  	depends on (IA64_SGI_SN2 || IA64_GENERIC) && SGI_IOC4
@@ -53625,8 +56252,228 @@
  	select BLK_DEV_IDEDMA_PCI
  	help
  	  This driver adds PIO & MultiMode DMA-2 support for the SGI IOC4
+@@ -801,7 +800,7 @@ config BLK_DEV_CELLEB
+ endif
+ 
+ config BLK_DEV_IDE_PMAC
+-	bool "Builtin PowerMac IDE support"
++	tristate "Builtin PowerMac IDE support"
+ 	depends on PPC_PMAC && IDE=y && BLK_DEV_IDE=y
+ 	help
+ 	  This driver provides support for the built-in IDE controller on
+@@ -855,8 +854,9 @@ config BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ
+        depends on BLK_DEV_IDE_AU1XXX
+ 
+ config IDE_ARM
+-	def_bool ARM && (ARCH_CLPS7500 || ARCH_RPC || ARCH_SHARK)
+-	select IDE_GENERIC
++	tristate "ARM IDE support"
++	depends on ARM && (ARCH_CLPS7500 || ARCH_RPC || ARCH_SHARK)
++	default y
+ 
+ config BLK_DEV_IDE_ICSIDE
+ 	tristate "ICS IDE interface support"
+@@ -888,10 +888,9 @@ config BLK_DEV_IDE_BAST
+ 	  Simtec BAST or the Thorcom VR1000
+ 
+ config ETRAX_IDE
+-	bool "ETRAX IDE support"
++	tristate "ETRAX IDE support"
+ 	depends on CRIS && BROKEN
+ 	select BLK_DEV_IDEDMA
+-	select IDE_GENERIC
+ 	help
+ 	  Enables the ETRAX IDE driver.
+ 
+@@ -923,17 +922,15 @@ config ETRAX_IDE_G27_RESET
+ endchoice
+ 
+ config IDE_H8300
+-	bool "H8300 IDE support"
++	tristate "H8300 IDE support"
+ 	depends on H8300
+-	select IDE_GENERIC
+ 	default y
+ 	help
+ 	  Enables the H8300 IDE driver.
+ 
+ config BLK_DEV_GAYLE
+-	bool "Amiga Gayle IDE interface support"
++	tristate "Amiga Gayle IDE interface support"
+ 	depends on AMIGA
+-	select IDE_GENERIC
+ 	help
+ 	  This is the IDE driver for the Amiga Gayle IDE interface. It supports
+ 	  both the `A1200 style' and `A4000 style' of the Gayle IDE interface,
+@@ -963,9 +960,8 @@ config BLK_DEV_IDEDOUBLER
+ 	  runtime using the "ide=doubler" kernel boot parameter.
+ 
+ config BLK_DEV_BUDDHA
+-	bool "Buddha/Catweasel/X-Surf IDE interface support (EXPERIMENTAL)"
++	tristate "Buddha/Catweasel/X-Surf IDE interface support (EXPERIMENTAL)"
+ 	depends on ZORRO && EXPERIMENTAL
+-	select IDE_GENERIC
+ 	help
+ 	  This is the IDE driver for the IDE interfaces on the Buddha, 
+ 	  Catweasel and X-Surf expansion boards.  It supports up to two interfaces 
+@@ -976,9 +972,8 @@ config BLK_DEV_BUDDHA
+ 	  to one of its IDE interfaces.
+ 
+ config BLK_DEV_FALCON_IDE
+-	bool "Falcon IDE interface support"
++	tristate "Falcon IDE interface support"
+ 	depends on ATARI
+-	select IDE_GENERIC
+ 	help
+ 	  This is the IDE driver for the builtin IDE interface on the Atari
+ 	  Falcon. Say Y if you have a Falcon and want to use IDE devices (hard
+@@ -986,9 +981,8 @@ config BLK_DEV_FALCON_IDE
+ 	  interface.
+ 
+ config BLK_DEV_MAC_IDE
+-	bool "Macintosh Quadra/Powerbook IDE interface support"
++	tristate "Macintosh Quadra/Powerbook IDE interface support"
+ 	depends on MAC
+-	select IDE_GENERIC
+ 	help
+ 	  This is the IDE driver for the builtin IDE interface on some m68k
+ 	  Macintosh models. It supports both the `Quadra style' (used in
+@@ -1000,18 +994,16 @@ config BLK_DEV_MAC_IDE
+ 	  builtin IDE interface.
+ 
+ config BLK_DEV_Q40IDE
+-	bool "Q40/Q60 IDE interface support"
++	tristate "Q40/Q60 IDE interface support"
+ 	depends on Q40
+-	select IDE_GENERIC
+ 	help
+ 	  Enable the on-board IDE controller in the Q40/Q60.  This should
+ 	  normally be on; disable it only if you are running a custom hard
+ 	  drive subsystem through an expansion card.
+ 
+ config BLK_DEV_MPC8xx_IDE
+-	bool "MPC8xx IDE support"
++	tristate "MPC8xx IDE support"
+ 	depends on 8xx && (LWMON || IVMS8 || IVML24 || TQM8xxL) && IDE=y && BLK_DEV_IDE=y && !PPC_MERGE
+-	select IDE_GENERIC
+ 	help
+ 	  This option provides support for IDE on Motorola MPC8xx Systems.
+ 	  Please see 'Type of MPC8xx IDE interface' for details.
+diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
+index b181fc6..0d2da89 100644
+--- a/drivers/ide/Makefile
++++ b/drivers/ide/Makefile
+@@ -7,41 +7,37 @@
+ # Note : at this point, these files are compiled on all systems.
+ # In the future, some of these should be built conditionally.
+ #
+-# First come modules that register themselves with the core
++# link order is important here
+ 
+ EXTRA_CFLAGS				+= -Idrivers/ide
+ 
+-obj-$(CONFIG_BLK_DEV_IDE)		+= pci/
+-
+ ide-core-y += ide.o ide-io.o ide-iops.o ide-lib.o ide-probe.o ide-taskfile.o
+ 
+-ide-core-$(CONFIG_BLK_DEV_CMD640)	+= pci/cmd640.o
+-
+-# Core IDE code - must come before legacy
++# core IDE code
+ ide-core-$(CONFIG_BLK_DEV_IDEPCI)	+= setup-pci.o
+ ide-core-$(CONFIG_BLK_DEV_IDEDMA)	+= ide-dma.o
+ ide-core-$(CONFIG_IDE_PROC_FS)		+= ide-proc.o
+-ide-core-$(CONFIG_BLK_DEV_IDEPNP)	+= ide-pnp.o
+ ide-core-$(CONFIG_BLK_DEV_IDEACPI)	+= ide-acpi.o
+ 
+-# built-in only drivers from arm/
+-ide-core-$(CONFIG_IDE_ARM)		+= arm/ide_arm.o
++obj-$(CONFIG_BLK_DEV_IDE)		+= ide-core.o
+ 
+-# built-in only drivers from legacy/
+-ide-core-$(CONFIG_BLK_DEV_BUDDHA)	+= legacy/buddha.o
+-ide-core-$(CONFIG_BLK_DEV_FALCON_IDE)	+= legacy/falconide.o
+-ide-core-$(CONFIG_BLK_DEV_GAYLE)	+= legacy/gayle.o
+-ide-core-$(CONFIG_BLK_DEV_MAC_IDE)	+= legacy/macide.o
+-ide-core-$(CONFIG_BLK_DEV_Q40IDE)	+= legacy/q40ide.o
++ifeq ($(CONFIG_IDE_ARM), y)
++	ide-arm-core-y += arm/ide_arm.o
++	obj-y += ide-arm-core.o
++endif
+ 
+-# built-in only drivers from ppc/
+-ide-core-$(CONFIG_BLK_DEV_MPC8xx_IDE)	+= ppc/mpc8xx.o
+-ide-core-$(CONFIG_BLK_DEV_IDE_PMAC)	+= ppc/pmac.o
++obj-$(CONFIG_BLK_DEV_IDE)		+= legacy/ pci/
+ 
+-# built-in only drivers from h8300/
+-ide-core-$(CONFIG_IDE_H8300)		+= h8300/ide-h8300.o
++obj-$(CONFIG_IDEPCI_PCIBUS_ORDER)	+= ide-scan-pci.o
+ 
+-obj-$(CONFIG_BLK_DEV_IDE)		+= ide-core.o
++ifeq ($(CONFIG_BLK_DEV_CMD640), y)
++	cmd640-core-y += pci/cmd640.o
++	obj-y += cmd640-core.o
++endif
++
++obj-$(CONFIG_BLK_DEV_IDE)		+= cris/ ppc/
++obj-$(CONFIG_BLK_DEV_IDEPNP)		+= ide-pnp.o
++obj-$(CONFIG_IDE_H8300)			+= h8300/
+ obj-$(CONFIG_IDE_GENERIC)		+= ide-generic.o
+ 
+ obj-$(CONFIG_BLK_DEV_IDEDISK)		+= ide-disk.o
+@@ -49,6 +45,20 @@ obj-$(CONFIG_BLK_DEV_IDECD)		+= ide-cd.o
+ obj-$(CONFIG_BLK_DEV_IDETAPE)		+= ide-tape.o
+ obj-$(CONFIG_BLK_DEV_IDEFLOPPY)		+= ide-floppy.o
+ 
+-obj-$(CONFIG_BLK_DEV_IDE)		+= legacy/ arm/ mips/
+-obj-$(CONFIG_BLK_DEV_HD)		+= legacy/
+-obj-$(CONFIG_ETRAX_IDE)		+= cris/
++ifeq ($(CONFIG_BLK_DEV_IDECS), y)
++	ide-cs-core-y += legacy/ide-cs.o
++	obj-y += ide-cs-core.o
++endif
++
++ifeq ($(CONFIG_BLK_DEV_PLATFORM), y)
++	ide-platform-core-y += legacy/ide_platform.o
++	obj-y += ide-platform-core.o
++endif
++
++obj-$(CONFIG_BLK_DEV_IDE)		+= arm/ mips/
++
++# old hd driver must be last
++ifeq ($(CONFIG_BLK_DEV_HD), y)
++	hd-core-y += legacy/hd.o
++	obj-y += hd-core.o
++endif
+diff --git a/drivers/ide/arm/Makefile b/drivers/ide/arm/Makefile
+index 6a78f07..5f63ad2 100644
+--- a/drivers/ide/arm/Makefile
++++ b/drivers/ide/arm/Makefile
+@@ -3,4 +3,8 @@ obj-$(CONFIG_BLK_DEV_IDE_ICSIDE)	+= icside.o
+ obj-$(CONFIG_BLK_DEV_IDE_RAPIDE)	+= rapide.o
+ obj-$(CONFIG_BLK_DEV_IDE_BAST)		+= bast-ide.o
+ 
++ifeq ($(CONFIG_IDE_ARM), m)
++	obj-m += ide_arm.o
++endif
++
+ EXTRA_CFLAGS	:= -Idrivers/ide
+diff --git a/drivers/ide/arm/bast-ide.c b/drivers/ide/arm/bast-ide.c
+index 48db616..45bf9c8 100644
+--- a/drivers/ide/arm/bast-ide.c
++++ b/drivers/ide/arm/bast-ide.c
+@@ -45,7 +45,7 @@ bastide_register(unsigned int base, unsigned int aux, int irq,
+ 	hw.io_ports[IDE_CONTROL_OFFSET] = aux + (6 * 0x20);
+ 	hw.irq = irq;
+ 
+-	ide_register_hw(&hw, NULL, 0, hwif);
++	ide_register_hw(&hw, NULL, hwif);
+ 
+ 	return 0;
+ }
 diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
-index 93f71fc..673402f 100644
+index 93f71fc..8a5c720 100644
 --- a/drivers/ide/arm/icside.c
 +++ b/drivers/ide/arm/icside.c
 @@ -272,8 +272,6 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
@@ -53638,11 +56485,169 @@
  	}
  
  	/*
+@@ -289,26 +287,10 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
+ 		ide_xfer_verbose(xfer_mode), 2000 / drive->drive_data);
+ }
+ 
+-static void icside_dma_host_off(ide_drive_t *drive)
++static void icside_dma_host_set(ide_drive_t *drive, int on)
+ {
+ }
+ 
+-static void icside_dma_off_quietly(ide_drive_t *drive)
+-{
+-	drive->using_dma = 0;
+-}
+-
+-static void icside_dma_host_on(ide_drive_t *drive)
+-{
+-}
+-
+-static int icside_dma_on(ide_drive_t *drive)
+-{
+-	drive->using_dma = 1;
+-
+-	return 0;
+-}
+-
+ static int icside_dma_end(ide_drive_t *drive)
+ {
+ 	ide_hwif_t *hwif = HWIF(drive);
+@@ -424,10 +406,7 @@ static void icside_dma_init(ide_hwif_t *hwif)
+ 	hwif->dmatable_dma	= 0;
+ 	hwif->set_dma_mode	= icside_set_dma_mode;
+ 
+-	hwif->dma_host_off	= icside_dma_host_off;
+-	hwif->dma_off_quietly	= icside_dma_off_quietly;
+-	hwif->dma_host_on	= icside_dma_host_on;
+-	hwif->ide_dma_on	= icside_dma_on;
++	hwif->dma_host_set	= icside_dma_host_set;
+ 	hwif->dma_setup		= icside_dma_setup;
+ 	hwif->dma_exec_cmd	= icside_dma_exec_cmd;
+ 	hwif->dma_start		= icside_dma_start;
+diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/arm/ide_arm.c
+index 8957cba..60f2497 100644
+--- a/drivers/ide/arm/ide_arm.c
++++ b/drivers/ide/arm/ide_arm.c
+@@ -24,12 +24,25 @@
+ # define IDE_ARM_IRQ	IRQ_HARDDISK
+ #endif
+ 
+-void __init ide_arm_init(void)
++static int __init ide_arm_init(void)
+ {
++	ide_hwif_t *hwif;
+ 	hw_regs_t hw;
++	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ 
+ 	memset(&hw, 0, sizeof(hw));
+ 	ide_std_init_ports(&hw, IDE_ARM_IO, IDE_ARM_IO + 0x206);
+ 	hw.irq = IDE_ARM_IRQ;
+-	ide_register_hw(&hw, NULL, 1, NULL);
++
++	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
++	if (hwif) {
++		ide_init_port_hw(hwif, &hw);
++		idx[0] = hwif->index;
++
++		ide_device_add(idx);
++	}
++
++	return 0;
+ }
++
++module_init(ide_arm_init);
+diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c
+index 0775a3a..e6b56d1 100644
+--- a/drivers/ide/arm/rapide.c
++++ b/drivers/ide/arm/rapide.c
+@@ -13,26 +13,18 @@
+ 
+ #include <asm/ecard.h>
+ 
+-static ide_hwif_t *
+-rapide_locate_hwif(void __iomem *base, void __iomem *ctrl, unsigned int sz, int irq)
++static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base,
++			       void __iomem *ctrl, unsigned int sz, int irq)
+ {
+ 	unsigned long port = (unsigned long)base;
+-	ide_hwif_t *hwif = ide_find_port(port);
+ 	int i;
+ 
+-	if (hwif == NULL)
+-		goto out;
+-
+ 	for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
+-		hwif->io_ports[i] = port;
++		hw->io_ports[i] = port;
+ 		port += sz;
+ 	}
+-	hwif->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl;
+-	hwif->irq = irq;
+-	hwif->mmio = 1;
+-	default_hwif_mmiops(hwif);
+-out:
+-	return hwif;
++	hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl;
++	hw->irq = irq;
+ }
+ 
+ static int __devinit
+@@ -42,6 +34,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
+ 	void __iomem *base;
+ 	int ret;
+ 	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
++	hw_regs_t hw;
+ 
+ 	ret = ecard_request_resources(ec);
+ 	if (ret)
+@@ -53,11 +46,17 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
+ 		goto release;
+ 	}
+ 
+-	hwif = rapide_locate_hwif(base, base + 0x818, 1 << 6, ec->irq);
++	hwif = ide_find_port((unsigned long)base);
+ 	if (hwif) {
+-		hwif->hwif_data = base;
+-		hwif->gendev.parent = &ec->dev;
+-		hwif->noprobe = 0;
++		memset(&hw, 0, sizeof(hw));
++		rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq);
++		hw.chipset = ide_generic;
++		hw.dev = &ec->dev;
++
++		ide_init_port_hw(hwif, &hw);
++
++		hwif->mmio = 1;
++		default_hwif_mmiops(hwif);
+ 
+ 		idx[0] = hwif->index;
+ 
+diff --git a/drivers/ide/cris/Makefile b/drivers/ide/cris/Makefile
+index 6176e8d..20b9596 100644
+--- a/drivers/ide/cris/Makefile
++++ b/drivers/ide/cris/Makefile
+@@ -1,3 +1,3 @@
+ EXTRA_CFLAGS				+= -Idrivers/ide
+ 
+-obj-y					+= ide-cris.o
++obj-$(CONFIG_IDE_ETRAX)			+= ide-cris.o
 diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
-index 476e0d6..325e608 100644
+index 476e0d6..8c3294c 100644
 --- a/drivers/ide/cris/ide-cris.c
 +++ b/drivers/ide/cris/ide-cris.c
-@@ -747,8 +747,6 @@ static void cris_set_dma_mode(ide_drive_t *drive, const u8 speed)
+@@ -673,9 +673,8 @@ static void cris_ide_input_data (ide_drive_t *drive, void *, unsigned int);
+ static void cris_ide_output_data (ide_drive_t *drive, void *, unsigned int);
+ static void cris_atapi_input_bytes(ide_drive_t *drive, void *, unsigned int);
+ static void cris_atapi_output_bytes(ide_drive_t *drive, void *, unsigned int);
+-static int cris_dma_on (ide_drive_t *drive);
+ 
+-static void cris_dma_off(ide_drive_t *drive)
++static void cris_dma_host_set(ide_drive_t *drive, int on)
+ {
+ }
+ 
+@@ -747,8 +746,6 @@ static void cris_set_dma_mode(ide_drive_t *drive, const u8 speed)
  			strobe = ATA_DMA2_STROBE;
  			hold = ATA_DMA2_HOLD;
  			break;
@@ -53651,8 +56656,149 @@
  	}
  
  	if (speed >= XFER_UDMA_0)
+@@ -757,13 +754,11 @@ static void cris_set_dma_mode(ide_drive_t *drive, const u8 speed)
+ 		cris_ide_set_speed(TYPE_DMA, 0, strobe, hold);
+ }
+ 
+-void __init
+-init_e100_ide (void)
++static int __init init_e100_ide(void)
+ {
+ 	hw_regs_t hw;
+-	int ide_offsets[IDE_NR_PORTS];
+-	int h;
+-	int i;
++	int ide_offsets[IDE_NR_PORTS], h, i;
++	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ 
+ 	printk("ide: ETRAX FS built-in ATA DMA controller\n");
+ 
+@@ -780,9 +775,11 @@ init_e100_ide (void)
+ 		                ide_offsets,
+ 		                0, 0, cris_ide_ack_intr,
+ 		                ide_default_irq(0));
+-		ide_register_hw(&hw, NULL, 1, &hwif);
++		hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
+ 		if (hwif == NULL)
+ 			continue;
++		ide_init_port_data(hwif, hwif->index);
++		ide_init_port_hw(hwif, &hw);
+ 		hwif->mmio = 1;
+ 		hwif->chipset = ide_etrax100;
+ 		hwif->set_pio_mode = &cris_set_pio_mode;
+@@ -791,6 +788,7 @@ init_e100_ide (void)
+ 		hwif->ata_output_data = &cris_ide_output_data;
+ 		hwif->atapi_input_bytes = &cris_atapi_input_bytes;
+ 		hwif->atapi_output_bytes = &cris_atapi_output_bytes;
++		hwif->dma_host_set = &cris_dma_host_set;
+ 		hwif->ide_dma_end = &cris_dma_end;
+ 		hwif->dma_setup = &cris_dma_setup;
+ 		hwif->dma_exec_cmd = &cris_dma_exec_cmd;
+@@ -801,9 +799,6 @@ init_e100_ide (void)
+ 		hwif->OUTBSYNC = &cris_ide_outbsync;
+ 		hwif->INB = &cris_ide_inb;
+ 		hwif->INW = &cris_ide_inw;
+-		hwif->dma_host_off = &cris_dma_off;
+-		hwif->dma_host_on = &cris_dma_on;
+-		hwif->dma_off_quietly = &cris_dma_off;
+ 		hwif->cbl = ATA_CBL_PATA40;
+ 		hwif->host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
+ 		hwif->pio_mask = ATA_PIO4,
+@@ -811,6 +806,8 @@ init_e100_ide (void)
+ 		hwif->drives[1].autotune = 1;
+ 		hwif->ultra_mask = cris_ultra_mask;
+ 		hwif->mwdma_mask = 0x07; /* Multiword DMA 0-2 */
++
++		idx[h] = hwif->index;
+ 	}
+ 
+ 	/* Reset pulse */
+@@ -823,14 +820,12 @@ init_e100_ide (void)
+ 	cris_ide_set_speed(TYPE_PIO, ATA_PIO4_SETUP, ATA_PIO4_STROBE, ATA_PIO4_HOLD);
+ 	cris_ide_set_speed(TYPE_DMA, 0, ATA_DMA2_STROBE, ATA_DMA2_HOLD);
+ 	cris_ide_set_speed(TYPE_UDMA, ATA_UDMA2_CYC, ATA_UDMA2_DVS, 0);
+-}
+ 
+-static int cris_dma_on (ide_drive_t *drive)
+-{
++	ide_device_add(idx);
++
+ 	return 0;
+ }
+ 
+-
+ static cris_dma_descr_type mydescr __attribute__ ((__aligned__(16)));
+ 
+ /*
+@@ -1062,3 +1057,5 @@ static void cris_dma_start(ide_drive_t *drive)
+ 		LED_DISK_READ(1);
+ 	}
+ }
++
++module_init(init_e100_ide);
+diff --git a/drivers/ide/h8300/Makefile b/drivers/ide/h8300/Makefile
+new file mode 100644
+index 0000000..5eba16f
+--- /dev/null
++++ b/drivers/ide/h8300/Makefile
+@@ -0,0 +1,2 @@
++
++obj-$(CONFIG_IDE_H8300)			+= ide-h8300.o
+diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c
+index 4a49b5c..4f6d019 100644
+--- a/drivers/ide/h8300/ide-h8300.c
++++ b/drivers/ide/h8300/ide-h8300.c
+@@ -84,11 +84,12 @@ static inline void hwif_setup(ide_hwif_t *hwif)
+ 	hwif->INSL  = NULL;
+ }
+ 
+-void __init h8300_ide_init(void)
++static int __init h8300_ide_init(void)
+ {
+ 	hw_regs_t hw;
+ 	ide_hwif_t *hwif;
+-	int idx;
++	int index;
++	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ 
+ 	if (!request_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8, "ide-h8300"))
+ 		goto out_busy;
+@@ -100,16 +101,28 @@ void __init h8300_ide_init(void)
+ 	hw_setup(&hw);
+ 
+ 	/* register if */
+-	idx = ide_register_hw(&hw, NULL, 1, &hwif);
+-	if (idx == -1) {
++	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
++	if (hwif == NULL) {
+ 		printk(KERN_ERR "ide-h8300: IDE I/F register failed\n");
+-		return;
++		return -ENOENT;
+ 	}
+ 
++	index = hwif->index;
++	ide_init_port_data(hwif, index);
++	ide_init_port_hw(hwif, &hw);
+ 	hwif_setup(hwif);
+-	printk(KERN_INFO "ide%d: H8/300 generic IDE interface\n", idx);
+-	return;
++	printk(KERN_INFO "ide%d: H8/300 generic IDE interface\n", index);
++
++	idx[0] = index;
++
++	ide_device_add(idx);
++
++	return 0;
+ 
+ out_busy:
+ 	printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n");
++
++	return -EBUSY;
+ }
++
++module_init(h8300_ide_init);
 diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
-index 899d565..e0bb0cf 100644
+index 899d565..e888fc3 100644
 --- a/drivers/ide/ide-acpi.c
 +++ b/drivers/ide/ide-acpi.c
 @@ -383,27 +383,19 @@ static int taskfile_load_raw(ide_drive_t *drive,
@@ -53672,7 +56818,7 @@
 -	args.tfRegister[6] = gtf->tfa[5];	/* 0x1f6 */
 -	args.tfRegister[7] = gtf->tfa[6];	/* 0x1f7 */
 +	memcpy(&args.tf_array[7], &gtf->tfa, 7);
-+	args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE;
++	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
  
  	if (ide_noacpitfs) {
  		DEBPRINT("_GTF execution disabled\n");
@@ -53715,7 +56861,7 @@
  	if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt) {
  		/* waiting for CDB interrupt, not DMA yet. */
 diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
-index b178190..d8fdd86 100644
+index b178190..717e114 100644
 --- a/drivers/ide/ide-disk.c
 +++ b/drivers/ide/ide-disk.c
 @@ -129,6 +129,50 @@ static int lba_capacity_is_ok (struct hd_driveid *id)
@@ -53785,7 +56931,7 @@
  
  	if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
  		if (block + rq->nr_sectors > 1ULL << 28)
-@@ -155,121 +199,76 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
+@@ -155,121 +199,71 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
  		ide_map_sg(drive, rq);
  	}
  
@@ -53795,7 +56941,7 @@
 -	/* FIXME: SELECT_MASK(drive, 0) ? */
 +	memset(&task, 0, sizeof(task));
 +	task.tf_flags = IDE_TFLAG_NO_SELECT_MASK;  /* FIXME? */
-+	task.tf_flags |= (IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE);
++	task.tf_flags |= (IDE_TFLAG_TF | IDE_TFLAG_DEVICE);
  
  	if (drive->select.b.lba) {
  		if (lba48) {
@@ -53824,20 +56970,12 @@
 +				tf->hob_lbam = (u8)((u64)block >> 32);
 +				tf->hob_lbah = (u8)((u64)block >> 40);
  			}
-+
-+			tf->nsect  = nsectors & 0xff;
-+			tf->lbal   = (u8) block;
-+			tf->lbam   = (u8)(block >>  8);
-+			tf->lbah   = (u8)(block >> 16);
- #ifdef DEBUG
- 			printk("%s: 0x%02x%02x 0x%02x%02x%02x%02x%02x%02x\n",
+-#ifdef DEBUG
+-			printk("%s: 0x%02x%02x 0x%02x%02x%02x%02x%02x%02x\n",
 -				drive->name, tasklets[3], tasklets[2],
 -				tasklets[9], tasklets[8], tasklets[7],
 -				tasklets[6], tasklets[5], tasklets[4]);
-+				drive->name, tf->hob_nsect, tf->nsect,
-+				tf->hob_lbah, tf->hob_lbam, tf->hob_lbal,
-+				tf->lbah, tf->lbam, tf->lbal);
- #endif
+-#endif
 -			hwif->OUTB(tasklets[1], IDE_FEATURE_REG);
 -			hwif->OUTB(tasklets[3], IDE_NSECTOR_REG);
 -			hwif->OUTB(tasklets[7], IDE_SECTOR_REG);
@@ -53850,7 +56988,13 @@
 -			hwif->OUTB(tasklets[5], IDE_LCYL_REG);
 -			hwif->OUTB(tasklets[6], IDE_HCYL_REG);
 -			hwif->OUTB(0x00|drive->select.all,IDE_SELECT_REG);
-+			task.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_OUT_HOB);
++
++			tf->nsect  = nsectors & 0xff;
++			tf->lbal   = (u8) block;
++			tf->lbam   = (u8)(block >>  8);
++			tf->lbah   = (u8)(block >> 16);
++
++			task.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB);
  		} else {
 -			hwif->OUTB(0x00, IDE_FEATURE_REG);
 -			hwif->OUTB(nsectors.b.low, IDE_NSECTOR_REG);
@@ -53904,15 +57048,9 @@
 -		/* fallback to PIO */
 -		ide_init_sg_cmd(drive, rq);
 -	}
-+	if (rq_data_dir(rq))
-+		task.tf_flags |= IDE_TFLAG_WRITE;
- 
+-
 -	if (rq_data_dir(rq) == READ) {
-+	ide_tf_set_cmd(drive, &task, dma);
-+	if (!dma)
-+		hwif->data_phase = task.data_phase;
-+	task.rq = rq;
- 
+-
 -		if (drive->mult_count) {
 -			hwif->data_phase = TASKFILE_MULTI_IN;
 -			command = lba48 ? WIN_MULTREAD_EXT : WIN_MULTREAD;
@@ -53920,7 +57058,8 @@
 -			hwif->data_phase = TASKFILE_IN;
 -			command = lba48 ? WIN_READ_EXT : WIN_READ;
 -		}
-+	rc = do_rw_taskfile(drive, &task);
++	if (rq_data_dir(rq))
++		task.tf_flags |= IDE_TFLAG_WRITE;
  
 -		ide_execute_command(drive, command, &task_in_intr, WAIT_CMD, NULL);
 -		return ide_started;
@@ -53932,10 +57071,15 @@
 -			hwif->data_phase = TASKFILE_OUT;
 -			command = lba48 ? WIN_WRITE_EXT : WIN_WRITE;
 -		}
--
++	ide_tf_set_cmd(drive, &task, dma);
++	if (!dma)
++		hwif->data_phase = task.data_phase;
++	task.rq = rq;
+ 
 -		/* FIXME: ->OUTBSYNC ? */
 -		hwif->OUTB(command, IDE_COMMAND_REG);
--
++	rc = do_rw_taskfile(drive, &task);
+ 
 -		return pre_task_out_intr(drive, rq);
 +	if (rc == ide_stopped && dma) {
 +		/* fallback to PIO */
@@ -53950,7 +57094,7 @@
  }
  
  /*
-@@ -307,57 +306,29 @@ static ide_startstop_t ide_do_rw_disk (ide_drive_t *drive, struct request *rq, s
+@@ -307,57 +301,29 @@ static ide_startstop_t ide_do_rw_disk (ide_drive_t *drive, struct request *rq, s
   * Queries for true maximum capacity of the drive.
   * Returns maximum LBA address (> 0) of the drive, 0 if failed.
   */
@@ -53973,9 +57117,9 @@
 +	else
 +		tf->command = WIN_READ_NATIVE_MAX;
 +	tf->device  = ATA_LBA;
-+	args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE;
++	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
 +	if (lba48)
-+		args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_OUT_HOB);
++		args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB);
  	/* submit command request */
 -	ide_raw_taskfile(drive, &args, NULL);
 +	ide_no_data_taskfile(drive, &args);
@@ -53990,7 +57134,9 @@
 -	}
 -	return addr;
 -}
--
++	if ((tf->status & 0x01) == 0)
++		addr = ide_get_lba_addr(tf, lba48) + 1;
+ 
 -static unsigned long long idedisk_read_native_max_address_ext(ide_drive_t *drive)
 -{
 -	ide_task_t args;
@@ -53998,9 +57144,7 @@
 -
 -	/* Create IDE/ATA command request structure */
 -	memset(&args, 0, sizeof(ide_task_t));
-+	if ((tf->status & 0x01) == 0)
-+		addr = ide_get_lba_addr(tf, lba48) + 1;
- 
+-
 -	args.tfRegister[IDE_SELECT_OFFSET]	= 0x40;
 -	args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_READ_NATIVE_MAX_EXT;
 -	args.command_type			= IDE_DRIVE_TASK_NO_DATA;
@@ -54022,14 +57166,13 @@
  	return addr;
  }
  
-@@ -365,67 +336,37 @@ static unsigned long long idedisk_read_native_max_address_ext(ide_drive_t *drive
+@@ -365,67 +331,37 @@ static unsigned long long idedisk_read_native_max_address_ext(ide_drive_t *drive
   * Sets maximum virtual LBA address of the drive.
   * Returns new maximum virtual LBA address (> 0) or 0 on failure.
   */
 -static unsigned long idedisk_set_max_address(ide_drive_t *drive, unsigned long addr_req)
-+static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48)
- {
- 	ide_task_t args;
+-{
+-	ide_task_t args;
 -	unsigned long addr_set = 0;
 -	
 -	addr_req--;
@@ -54056,8 +57199,9 @@
 -}
 -
 -static unsigned long long idedisk_set_max_address_ext(ide_drive_t *drive, unsigned long long addr_req)
--{
--	ide_task_t args;
++static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48)
+ {
+ 	ide_task_t args;
 -	unsigned long long addr_set = 0;
 +	struct ide_taskfile *tf = &args.tf;
 +	u64 addr_set = 0;
@@ -54090,9 +57234,9 @@
 +		tf->command  = WIN_SET_MAX;
 +	}
 +	tf->device |= ATA_LBA;
-+	args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE;
++	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
 +	if (lba48)
-+		args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_OUT_HOB);
++		args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB);
  	/* submit command request */
 -	ide_raw_taskfile(drive, &args, NULL);
 +	ide_no_data_taskfile(drive, &args);
@@ -54113,7 +57257,7 @@
  	return addr_set;
  }
  
-@@ -471,10 +412,8 @@ static void idedisk_check_hpa(ide_drive_t *drive)
+@@ -471,10 +407,8 @@ static void idedisk_check_hpa(ide_drive_t *drive)
  	int lba48 = idedisk_supports_lba48(drive->id);
  
  	capacity = drive->capacity64;
@@ -54126,7 +57270,7 @@
  
  	if (ide_in_drive_list(drive->id, hpa_list)) {
  		/*
-@@ -495,10 +434,8 @@ static void idedisk_check_hpa(ide_drive_t *drive)
+@@ -495,10 +429,8 @@ static void idedisk_check_hpa(ide_drive_t *drive)
  			 capacity, sectors_to_MB(capacity),
  			 set_max, sectors_to_MB(set_max));
  
@@ -54139,7 +57283,7 @@
  	if (set_max) {
  		drive->capacity64 = set_max;
  		printk(KERN_INFO "%s: Host Protected Area disabled.\n",
-@@ -556,32 +493,32 @@ static sector_t idedisk_capacity (ide_drive_t *drive)
+@@ -556,32 +488,32 @@ static sector_t idedisk_capacity (ide_drive_t *drive)
  static int smart_enable(ide_drive_t *drive)
  {
  	ide_task_t args;
@@ -54157,7 +57301,7 @@
 +	tf->lbam    = SMART_LCYL_PASS;
 +	tf->lbah    = SMART_HCYL_PASS;
 +	tf->command = WIN_SMART;
-+	args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE;
++	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
 +	return ide_no_data_taskfile(drive, &args);
  }
  
@@ -54180,7 +57324,7 @@
 +	tf->lbam    = SMART_LCYL_PASS;
 +	tf->lbah    = SMART_HCYL_PASS;
 +	tf->command = WIN_SMART;
-+	args.tf_flags	= IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE;
++	args.tf_flags	= IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
 +	args.data_phase	= TASKFILE_IN;
  	(void) smart_enable(drive);
 -	return ide_raw_taskfile(drive, &args, buf);
@@ -54188,7 +57332,7 @@
  }
  
  static int proc_idedisk_read_cache
-@@ -659,19 +596,20 @@ static ide_proc_entry_t idedisk_proc[] = {
+@@ -659,19 +591,20 @@ static ide_proc_entry_t idedisk_proc[] = {
  static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
  {
  	ide_drive_t *drive = q->queuedata;
@@ -54203,11 +57347,11 @@
 +		task.tf.command = WIN_FLUSH_CACHE_EXT;
  	else
 -		rq->cmd[0] = WIN_FLUSH_CACHE;
--
 +		task.tf.command = WIN_FLUSH_CACHE;
 +	task.tf_flags	= IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE;
 +	task.data_phase	= TASKFILE_NO_DATA;
  
+-
 -	rq->cmd_type = REQ_TYPE_ATA_TASK;
 +	rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
  	rq->cmd_flags |= REQ_SOFTBARRIER;
@@ -54216,7 +57360,19 @@
  }
  
  /*
-@@ -753,12 +691,11 @@ static int write_cache(ide_drive_t *drive, int arg)
+@@ -687,8 +620,10 @@ static int set_multcount(ide_drive_t *drive, int arg)
+ 
+ 	if (drive->special.b.set_multmode)
+ 		return -EBUSY;
++
+ 	ide_init_drive_cmd (&rq);
+-	rq.cmd_type = REQ_TYPE_ATA_CMD;
++	rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
++
+ 	drive->mult_req = arg;
+ 	drive->special.b.set_multmode = 1;
+ 	(void) ide_do_drive_cmd (drive, &rq, ide_wait);
+@@ -753,12 +688,11 @@ static int write_cache(ide_drive_t *drive, int arg)
  
  	if (ide_id_has_flush_cache(drive->id)) {
  		memset(&args, 0, sizeof(ide_task_t));
@@ -54228,12 +57384,12 @@
 -		args.handler			= &task_no_data_intr;
 -		err = ide_raw_taskfile(drive, &args, NULL);
 +		args.tf.command = WIN_SETFEATURES;
-+		args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE;
++		args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
 +		err = ide_no_data_taskfile(drive, &args);
  		if (err == 0)
  			drive->wcache = arg;
  	}
-@@ -774,12 +711,11 @@ static int do_idedisk_flushcache (ide_drive_t *drive)
+@@ -774,12 +708,11 @@ static int do_idedisk_flushcache (ide_drive_t *drive)
  
  	memset(&args, 0, sizeof(ide_task_t));
  	if (ide_id_has_flush_cache_ext(drive->id))
@@ -54245,12 +57401,12 @@
 -	args.handler				= &task_no_data_intr;
 -	return ide_raw_taskfile(drive, &args, NULL);
 +		args.tf.command = WIN_FLUSH_CACHE;
-+	args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE;
++	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
 +	return ide_no_data_taskfile(drive, &args);
  }
  
  static int set_acoustic (ide_drive_t *drive, int arg)
-@@ -790,13 +726,11 @@ static int set_acoustic (ide_drive_t *drive, int arg)
+@@ -790,13 +723,11 @@ static int set_acoustic (ide_drive_t *drive, int arg)
  		return -EINVAL;
  
  	memset(&args, 0, sizeof(ide_task_t));
@@ -54264,20 +57420,46 @@
 +	args.tf.feature = arg ? SETFEATURES_EN_AAM : SETFEATURES_DIS_AAM;
 +	args.tf.nsect   = arg;
 +	args.tf.command = WIN_SETFEATURES;
-+	args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE;
++	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
 +	ide_no_data_taskfile(drive, &args);
  	drive->acoustic = arg;
  	return 0;
  }
-@@ -1057,16 +991,15 @@ static int idedisk_open(struct inode *inode, struct file *filp)
+@@ -832,7 +763,6 @@ static void idedisk_add_settings(ide_drive_t *drive)
+ 	ide_add_setting(drive,	"bios_head",	SETTING_RW,	TYPE_BYTE,	0,	255,			1,	1,	&drive->bios_head,	NULL);
+ 	ide_add_setting(drive,	"bios_sect",	SETTING_RW,	TYPE_BYTE,	0,	63,			1,	1,	&drive->bios_sect,	NULL);
+ 	ide_add_setting(drive,	"address",	SETTING_RW,	TYPE_BYTE,	0,	2,			1,	1,	&drive->addressing,	set_lba_addressing);
+-	ide_add_setting(drive,	"bswap",	SETTING_READ,	TYPE_BYTE,	0,	1,			1,	1,	&drive->bswap,		NULL);
+ 	ide_add_setting(drive,	"multcount",	SETTING_RW,	TYPE_BYTE,	0,	id->max_multsect,	1,	1,	&drive->mult_count,	set_multcount);
+ 	ide_add_setting(drive,	"nowerr",	SETTING_RW,	TYPE_BYTE,	0,	1,			1,	1,	&drive->nowerr,		set_nowerr);
+ 	ide_add_setting(drive,	"lun",		SETTING_RW,	TYPE_INT,	0,	7,			1,	1,	&drive->lun,		NULL);
+@@ -1041,6 +971,17 @@ static ide_driver_t idedisk_driver = {
+ #endif
+ };
+ 
++static int idedisk_set_doorlock(ide_drive_t *drive, int on)
++{
++	ide_task_t task;
++
++	memset(&task, 0, sizeof(task));
++	task.tf.command = on ? WIN_DOORLOCK : WIN_DOORUNLOCK;
++	task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
++
++	return ide_no_data_taskfile(drive, &task);
++}
++
+ static int idedisk_open(struct inode *inode, struct file *filp)
+ {
+ 	struct gendisk *disk = inode->i_bdev->bd_disk;
+@@ -1055,18 +996,13 @@ static int idedisk_open(struct inode *inode, struct file *filp)
+ 	idkp->openers++;
+ 
  	if (drive->removable && idkp->openers == 1) {
- 		ide_task_t args;
- 		memset(&args, 0, sizeof(ide_task_t));
+-		ide_task_t args;
+-		memset(&args, 0, sizeof(ide_task_t));
 -		args.tfRegister[IDE_COMMAND_OFFSET] = WIN_DOORLOCK;
 -		args.command_type = IDE_DRIVE_TASK_NO_DATA;
 -		args.handler	  = &task_no_data_intr;
-+		args.tf.command = WIN_DOORLOCK;
-+		args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE;
  		check_disk_change(inode->i_bdev);
  		/*
  		 * Ignore the return code from door_lock,
@@ -54285,51 +57467,208 @@
  		 * and the door_lock is irrelevant at this point.
  		 */
 -		if (drive->doorlocking && ide_raw_taskfile(drive, &args, NULL))
-+		if (drive->doorlocking && ide_no_data_taskfile(drive, &args))
++		if (drive->doorlocking && idedisk_set_doorlock(drive, 1))
  			drive->doorlocking = 0;
  	}
  	return 0;
-@@ -1084,10 +1017,9 @@ static int idedisk_release(struct inode *inode, struct file *filp)
+@@ -1082,12 +1018,7 @@ static int idedisk_release(struct inode *inode, struct file *filp)
+ 		ide_cacheflush_p(drive);
+ 
  	if (drive->removable && idkp->openers == 1) {
- 		ide_task_t args;
- 		memset(&args, 0, sizeof(ide_task_t));
+-		ide_task_t args;
+-		memset(&args, 0, sizeof(ide_task_t));
 -		args.tfRegister[IDE_COMMAND_OFFSET] = WIN_DOORUNLOCK;
 -		args.command_type = IDE_DRIVE_TASK_NO_DATA;
 -		args.handler	  = &task_no_data_intr;
 -		if (drive->doorlocking && ide_raw_taskfile(drive, &args, NULL))
-+		args.tf.command = WIN_DOORUNLOCK;
-+		args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE;
-+		if (drive->doorlocking && ide_no_data_taskfile(drive, &args))
++		if (drive->doorlocking && idedisk_set_doorlock(drive, 0))
  			drive->doorlocking = 0;
  	}
  
 diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
-index 4703837..18c78ad 100644
+index 4703837..5bf3203 100644
 --- a/drivers/ide/ide-dma.c
 +++ b/drivers/ide/ide-dma.c
-@@ -491,10 +491,6 @@ EXPORT_SYMBOL(ide_dma_host_on);
-  
- int __ide_dma_on (ide_drive_t *drive)
+@@ -153,13 +153,7 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive)
+ 		if (!dma_stat) {
+ 			struct request *rq = HWGROUP(drive)->rq;
+ 
+-			if (rq->rq_disk) {
+-				ide_driver_t *drv;
+-
+-				drv = *(ide_driver_t **)rq->rq_disk->private_data;
+-				drv->end_request(drive, 1, rq->nr_sectors);
+-			} else
+-				ide_end_request(drive, 1, rq->nr_sectors);
++			task_end_request(drive, rq, stat);
+ 			return ide_stopped;
+ 		}
+ 		printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n", 
+@@ -408,23 +402,29 @@ static int dma_timer_expiry (ide_drive_t *drive)
+ }
+ 
+ /**
+- *	ide_dma_host_off	-	Generic DMA kill
++ *	ide_dma_host_set	-	Enable/disable DMA on a host
+  *	@drive: drive to control
+  *
+- *	Perform the generic IDE controller DMA off operation. This
+- *	works for most IDE bus mastering controllers
++ *	Enable/disable DMA on an IDE controller following generic
++ *	bus-mastering IDE controller behaviour.
+  */
+ 
+-void ide_dma_host_off(ide_drive_t *drive)
++void ide_dma_host_set(ide_drive_t *drive, int on)
+ {
+ 	ide_hwif_t *hwif	= HWIF(drive);
+ 	u8 unit			= (drive->select.b.unit & 0x01);
+ 	u8 dma_stat		= hwif->INB(hwif->dma_status);
+ 
+-	hwif->OUTB((dma_stat & ~(1<<(5+unit))), hwif->dma_status);
++	if (on)
++		dma_stat |= (1 << (5 + unit));
++	else
++		dma_stat &= ~(1 << (5 + unit));
++
++	hwif->OUTB(dma_stat, hwif->dma_status);
+ }
+ 
+-EXPORT_SYMBOL(ide_dma_host_off);
++EXPORT_SYMBOL_GPL(ide_dma_host_set);
++#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
+ 
+ /**
+  *	ide_dma_off_quietly	-	Generic DMA kill
+@@ -438,11 +438,10 @@ void ide_dma_off_quietly(ide_drive_t *drive)
+ 	drive->using_dma = 0;
+ 	ide_toggle_bounce(drive, 0);
+ 
+-	drive->hwif->dma_host_off(drive);
++	drive->hwif->dma_host_set(drive, 0);
+ }
+ 
+ EXPORT_SYMBOL(ide_dma_off_quietly);
+-#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
+ 
+ /**
+  *	ide_dma_off	-	disable DMA on a device
+@@ -455,56 +454,29 @@ EXPORT_SYMBOL(ide_dma_off_quietly);
+ void ide_dma_off(ide_drive_t *drive)
  {
+ 	printk(KERN_INFO "%s: DMA disabled\n", drive->name);
+-	drive->hwif->dma_off_quietly(drive);
++	ide_dma_off_quietly(drive);
+ }
+ 
+ EXPORT_SYMBOL(ide_dma_off);
+ 
+-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
+ /**
+- *	ide_dma_host_on	-	Enable DMA on a host
+- *	@drive: drive to enable for DMA
+- *
+- *	Enable DMA on an IDE controller following generic bus mastering
+- *	IDE controller behaviour
+- */
+-
+-void ide_dma_host_on(ide_drive_t *drive)
+-{
+-	if (drive->using_dma) {
+-		ide_hwif_t *hwif	= HWIF(drive);
+-		u8 unit			= (drive->select.b.unit & 0x01);
+-		u8 dma_stat		= hwif->INB(hwif->dma_status);
+-
+-		hwif->OUTB((dma_stat|(1<<(5+unit))), hwif->dma_status);
+-	}
+-}
+-
+-EXPORT_SYMBOL(ide_dma_host_on);
+-
+-/**
+- *	__ide_dma_on		-	Enable DMA on a device
++ *	ide_dma_on		-	Enable DMA on a device
+  *	@drive: drive to enable DMA on
+  *
+  *	Enable IDE DMA for a device on this IDE controller.
+  */
+- 
+-int __ide_dma_on (ide_drive_t *drive)
+-{
 -	/* consult the list of known "bad" drives */
 -	if (__ide_dma_bad_drive(drive))
 -		return 1;
--
+ 
++void ide_dma_on(ide_drive_t *drive)
++{
  	drive->using_dma = 1;
  	ide_toggle_bounce(drive, 1);
  
-@@ -827,22 +823,19 @@ int ide_set_dma(ide_drive_t *drive)
- 	ide_hwif_t *hwif = drive->hwif;
+-	drive->hwif->dma_host_on(drive);
+-
+-	return 0;
++	drive->hwif->dma_host_set(drive, 1);
+ }
+ 
+-EXPORT_SYMBOL(__ide_dma_on);
++EXPORT_SYMBOL(ide_dma_on);
+ 
++#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
+ /**
+  *	ide_dma_setup	-	begin a DMA phase
+  *	@drive: target device
+@@ -759,6 +731,7 @@ EXPORT_SYMBOL_GPL(ide_find_dma_mode);
+ 
+ static int ide_tune_dma(ide_drive_t *drive)
+ {
++	ide_hwif_t *hwif = drive->hwif;
+ 	u8 speed;
+ 
+ 	if (noautodma || drive->nodma || (drive->id->capability & 1) == 0)
+@@ -771,15 +744,21 @@ static int ide_tune_dma(ide_drive_t *drive)
+ 	if (ide_id_dma_bug(drive))
+ 		return 0;
+ 
+-	if (drive->hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
++	if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
+ 		return config_drive_for_dma(drive);
+ 
+ 	speed = ide_max_dma_mode(drive);
+ 
+-	if (!speed)
+-		return 0;
++	if (!speed) {
++		 /* is this really correct/needed? */
++		if ((hwif->host_flags & IDE_HFLAG_CY82C693) &&
++		    ide_dma_good_drive(drive))
++			return 1;
++		else
++			return 0;
++	}
+ 
+-	if (drive->hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
++	if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
+ 		return 0;
+ 
+ 	if (ide_set_dma_mode(drive, speed))
+@@ -824,25 +803,23 @@ err_out:
+ 
+ int ide_set_dma(ide_drive_t *drive)
+ {
+-	ide_hwif_t *hwif = drive->hwif;
  	int rc;
  
--	rc = ide_dma_check(drive);
 +	/*
 +	 * Force DMAing for the beginning of the check.
 +	 * Some chipsets appear to do interesting
 +	 * things, if not checked and cleared.
 +	 *   PARANOIA!!!
 +	 */
-+	hwif->dma_off_quietly(drive);
++	ide_dma_off_quietly(drive);
++
+ 	rc = ide_dma_check(drive);
++	if (rc)
++		return rc;
  
 -	switch(rc) {
 -	case -1: /* DMA needs to be disabled */
@@ -54343,16 +57682,14 @@
 -		BUG();
 -		break;
 -	}
-+	rc = ide_dma_check(drive);
-+	if (rc)
-+		return rc;
++	ide_dma_on(drive);
  
 -	return rc;
-+	return hwif->ide_dma_on(drive);
++	return 0;
  }
  
  #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
-@@ -968,11 +961,6 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base, unsigned num_ports)
+@@ -968,11 +945,6 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base, unsigned num_ports)
  
  	hwif->dma_base = base;
  
@@ -54364,7 +57701,24 @@
  	if (!(hwif->dma_command))
  		hwif->dma_command	= hwif->dma_base;
  	if (!(hwif->dma_vendor1))
-@@ -1014,8 +1002,6 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base, unsigned num_ports)
+@@ -984,14 +956,8 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base, unsigned num_ports)
+ 	if (!(hwif->dma_prdtable))
+ 		hwif->dma_prdtable	= (hwif->dma_base + 4);
+ 
+-	if (!hwif->dma_off_quietly)
+-		hwif->dma_off_quietly = &ide_dma_off_quietly;
+-	if (!hwif->dma_host_off)
+-		hwif->dma_host_off = &ide_dma_host_off;
+-	if (!hwif->ide_dma_on)
+-		hwif->ide_dma_on = &__ide_dma_on;
+-	if (!hwif->dma_host_on)
+-		hwif->dma_host_on = &ide_dma_host_on;
++	if (!hwif->dma_host_set)
++		hwif->dma_host_set = &ide_dma_host_set;
+ 	if (!hwif->dma_setup)
+ 		hwif->dma_setup = &ide_dma_setup;
+ 	if (!hwif->dma_exec_cmd)
+@@ -1014,8 +980,6 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base, unsigned num_ports)
  		       hwif->drives[1].name, (dma_stat & 0x40) ? "DMA" : "pio");
  	}
  	printk("\n");
@@ -54638,10 +57992,41 @@
  	}
  	if (put_user(progress_indication, arg))
  		return (-EFAULT);
+diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
+index 0f72b98..bb30c29 100644
+--- a/drivers/ide/ide-generic.c
++++ b/drivers/ide/ide-generic.c
+@@ -14,10 +14,16 @@
+ 
+ static int __init ide_generic_init(void)
+ {
++	u8 idx[MAX_HWIFS];
++	int i;
++
+ 	if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET])
+ 		ide_get_lock(NULL, NULL); /* for atari only */
+ 
+-	(void)ideprobe_init();
++	for (i = 0; i < MAX_HWIFS; i++)
++		idx[i] = ide_hwifs[i].present ? 0xff : i;
++
++	ide_device_add_all(idx);
+ 
+ 	if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET])
+ 		ide_release_lock();	/* for atari only */
 diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
-index bef781f..2711b5a 100644
+index bef781f..6f8f544 100644
 --- a/drivers/ide/ide-io.c
 +++ b/drivers/ide/ide-io.c
+@@ -75,7 +75,7 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
+ 	 */
+ 	if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) {
+ 		drive->state = 0;
+-		HWGROUP(drive)->hwif->ide_dma_on(drive);
++		ide_dma_on(drive);
+ 	}
+ 
+ 	if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
 @@ -189,18 +189,14 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
  			return ide_stopped;
  		}
@@ -54679,9 +58064,12 @@
  
  	case ide_pm_restore_dma:	/* Resume step 3 (restore DMA) */
  		/*
-@@ -227,7 +221,6 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
+@@ -225,9 +219,8 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
+ 		 * we could be smarter and check for current xfer_speed
+ 		 * in struct drive etc...
  		 */
- 		if (drive->hwif->ide_dma_on == NULL)
+-		if (drive->hwif->ide_dma_on == NULL)
++		if (drive->hwif->dma_host_set == NULL)
  			break;
 -		drive->hwif->dma_off_quietly(drive);
  		/*
@@ -54693,7 +58081,7 @@
  	return ide_stopped;
 +
 +out_do_tf:
-+	args->tf_flags	 = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE;
++	args->tf_flags	 = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
 +	args->data_phase = TASKFILE_NO_DATA;
 +	return do_rw_taskfile(drive, args);
  }
@@ -54748,10 +58136,28 @@
  /**
   *	ide_end_drive_cmd	-	end an explicit drive command
   *	@drive: command 
-@@ -332,51 +372,22 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
- 			args[1] = err;
- 			args[2] = hwif->INB(IDE_NSECTOR_REG);
- 		}
+@@ -314,7 +354,6 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
+  
+ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
+ {
+-	ide_hwif_t *hwif = HWIF(drive);
+ 	unsigned long flags;
+ 	struct request *rq;
+ 
+@@ -322,61 +361,18 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
+ 	rq = HWGROUP(drive)->rq;
+ 	spin_unlock_irqrestore(&ide_lock, flags);
+ 
+-	if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
+-		u8 *args = (u8 *) rq->buffer;
+-		if (rq->errors == 0)
+-			rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
+-
+-		if (args) {
+-			args[0] = stat;
+-			args[1] = err;
+-			args[2] = hwif->INB(IDE_NSECTOR_REG);
+-		}
 -	} else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
 -		u8 *args = (u8 *) rq->buffer;
 -		if (rq->errors == 0)
@@ -54768,7 +58174,8 @@
 -			args[5] = hwif->INB(IDE_HCYL_REG);
 -			args[6] = hwif->INB(IDE_SELECT_REG);
 -		}
- 	} else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+-	} else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
++	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
  		ide_task_t *args = (ide_task_t *) rq->special;
  		if (rq->errors == 0)
  			rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
@@ -54802,18 +58209,15 @@
 +			tf->error = err;
 +			tf->status = stat;
 +
-+			args->tf_flags |= (IDE_TFLAG_IN_TF|IDE_TFLAG_IN_DEVICE);
-+			if (args->tf_flags & IDE_TFLAG_LBA48)
-+				args->tf_flags |= IDE_TFLAG_IN_HOB;
-+
 +			ide_tf_read(drive, args);
  		}
  	} else if (blk_pm_request(rq)) {
  		struct request_pm_state *pm = rq->data;
-@@ -616,28 +627,6 @@ ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg)
+@@ -615,90 +611,26 @@ ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg)
+ 		return __ide_abort(drive, rq);
  }
  
- /**
+-/**
 - *	ide_cmd		-	issue a simple drive command
 - *	@drive: drive the command is for
 - *	@cmd: command byte
@@ -54826,25 +58230,63 @@
 -
 -static void ide_cmd (ide_drive_t *drive, u8 cmd, u8 nsect,
 -		ide_handler_t *handler)
--{
++static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
+ {
 -	ide_hwif_t *hwif = HWIF(drive);
 -	if (IDE_CONTROL_REG)
 -		hwif->OUTB(drive->ctl,IDE_CONTROL_REG);	/* clear nIEN */
 -	SELECT_MASK(drive,0);
 -	hwif->OUTB(nsect,IDE_NSECTOR_REG);
 -	ide_execute_command(drive, cmd, handler, WAIT_CMD, NULL);
--}
--
++	tf->nsect   = drive->sect;
++	tf->lbal    = drive->sect;
++	tf->lbam    = drive->cyl;
++	tf->lbah    = drive->cyl >> 8;
++	tf->device  = ((drive->head - 1) | drive->select.all) & ~ATA_LBA;
++	tf->command = WIN_SPECIFY;
+ }
+ 
 -/**
-  *	drive_cmd_intr		- 	drive command completion interrupt
-  *	@drive: drive the completion interrupt occurred on
-  *
-@@ -673,32 +662,26 @@ static ide_startstop_t drive_cmd_intr (ide_drive_t *drive)
- 	return ide_stopped;
+- *	drive_cmd_intr		- 	drive command completion interrupt
+- *	@drive: drive the completion interrupt occurred on
+- *
+- *	drive_cmd_intr() is invoked on completion of a special DRIVE_CMD.
+- *	We do any necessary data reading and then wait for the drive to
+- *	go non busy. At that point we may read the error data and complete
+- *	the request
+- */
+- 
+-static ide_startstop_t drive_cmd_intr (ide_drive_t *drive)
++static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
+ {
+-	struct request *rq = HWGROUP(drive)->rq;
+-	ide_hwif_t *hwif = HWIF(drive);
+-	u8 *args = (u8 *) rq->buffer;
+-	u8 stat = hwif->INB(IDE_STATUS_REG);
+-	int retries = 10;
+-
+-	local_irq_enable_in_hardirq();
+-	if (rq->cmd_type == REQ_TYPE_ATA_CMD &&
+-	    (stat & DRQ_STAT) && args && args[3]) {
+-		u8 io_32bit = drive->io_32bit;
+-		drive->io_32bit = 0;
+-		hwif->ata_input_data(drive, &args[4], args[3] * SECTOR_WORDS);
+-		drive->io_32bit = io_32bit;
+-		while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
+-			udelay(100);
+-	}
+-
+-	if (!OK_STAT(stat, READY_STAT, BAD_STAT))
+-		return ide_error(drive, "drive_cmd", stat);
+-		/* calls ide_end_drive_cmd */
+-	ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG));
+-	return ide_stopped;
++	tf->nsect   = drive->sect;
++	tf->command = WIN_RESTORE;
  }
  
 -static void ide_init_specify_cmd(ide_drive_t *drive, ide_task_t *task)
-+static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
++static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
  {
 -	task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect;
 -	task->tfRegister[IDE_SECTOR_OFFSET]  = drive->sect;
@@ -54854,28 +58296,18 @@
 -	task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SPECIFY;
 -
 -	task->handler = &set_geometry_intr;
-+	tf->nsect   = drive->sect;
-+	tf->lbal    = drive->sect;
-+	tf->lbam    = drive->cyl;
-+	tf->lbah    = drive->cyl >> 8;
-+	tf->device  = ((drive->head - 1) | drive->select.all) & ~ATA_LBA;
-+	tf->command = WIN_SPECIFY;
- }
- 
+-}
+-
 -static void ide_init_restore_cmd(ide_drive_t *drive, ide_task_t *task)
-+static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
- {
+-{
 -	task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect;
 -	task->tfRegister[IDE_COMMAND_OFFSET] = WIN_RESTORE;
 -
 -	task->handler = &recal_intr;
-+	tf->nsect   = drive->sect;
-+	tf->command = WIN_RESTORE;
- }
- 
+-}
+-
 -static void ide_init_setmult_cmd(ide_drive_t *drive, ide_task_t *task)
-+static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
- {
+-{
 -	task->tfRegister[IDE_NSECTOR_OFFSET] = drive->mult_req;
 -	task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETMULT;
 -
@@ -54885,7 +58317,7 @@
  }
  
  static ide_startstop_t ide_disk_special(ide_drive_t *drive)
-@@ -707,19 +690,19 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive)
+@@ -707,19 +639,19 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive)
  	ide_task_t args;
  
  	memset(&args, 0, sizeof(ide_task_t));
@@ -54909,38 +58341,43 @@
  	} else if (s->all) {
  		int special = s->all;
  		s->all = 0;
-@@ -727,6 +710,9 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive)
+@@ -727,6 +659,9 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive)
  		return ide_stopped;
  	}
  
-+	args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE |
++	args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE |
 +			IDE_TFLAG_CUSTOM_HANDLER;
 +
  	do_rw_taskfile(drive, &args);
  
  	return ide_started;
-@@ -861,13 +847,17 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
+@@ -801,7 +736,7 @@ static ide_startstop_t do_special (ide_drive_t *drive)
+ 
+ 			if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) {
+ 				if (keep_dma)
+-					hwif->ide_dma_on(drive);
++					ide_dma_on(drive);
+ 			}
+ 		}
+ 
+@@ -861,13 +796,10 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
  		struct request *rq)
  {
  	ide_hwif_t *hwif = HWIF(drive);
-+	u8 *args = rq->buffer;
-+	ide_task_t ltask;
-+	struct ide_taskfile *tf = &ltask.tf;
-+
- 	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+-	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
 - 		ide_task_t *args = rq->special;
-+		ide_task_t *task = rq->special;
-  
+- 
 -		if (!args)
-+		if (task == NULL)
- 			goto done;
+-			goto done;
++	ide_task_t *task = rq->special;
  
 -		hwif->data_phase = args->data_phase;
++	if (task) {
 +		hwif->data_phase = task->data_phase;
  
  		switch (hwif->data_phase) {
  		case TASKFILE_MULTI_OUT:
-@@ -880,55 +870,34 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
+@@ -880,57 +812,9 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
  			break;
  		}
  
@@ -54974,22 +58411,13 @@
 -
 -		if (!args)
 -			goto done;
-+		return do_rw_taskfile(drive, task);
-+	}
-+
-+	if (args == NULL)
-+		goto done;
-+
-+	memset(&ltask, 0, sizeof(ltask));
-+	if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
- #ifdef DEBUG
+-#ifdef DEBUG
 - 		printk("%s: DRIVE_CMD ", drive->name);
 - 		printk("cmd=0x%02x ", args[0]);
 - 		printk("sc=0x%02x ", args[1]);
 - 		printk("fr=0x%02x ", args[2]);
 - 		printk("xx=0x%02x\n", args[3]);
-+		printk("%s: DRIVE_CMD\n", drive->name);
- #endif
+-#endif
 - 		if (args[0] == WIN_SMART) {
 - 			hwif->OUTB(0x4f, IDE_LCYL_REG);
 - 			hwif->OUTB(0xc2, IDE_HCYL_REG);
@@ -55001,27 +58429,26 @@
 - 		hwif->OUTB(args[2],IDE_FEATURE_REG);
 - 		ide_cmd(drive, args[0], args[1], &drive_cmd_intr);
 - 		return ide_started;
-+		tf->feature = args[2];
-+		if (args[0] == WIN_SMART) {
-+			tf->nsect = args[3];
-+			tf->lbal  = args[1];
-+			tf->lbam  = 0x4f;
-+			tf->lbah  = 0xc2;
-+			ltask.tf_flags = IDE_TFLAG_OUT_TF;
-+		} else {
-+			tf->nsect = args[1];
-+			ltask.tf_flags = IDE_TFLAG_OUT_FEATURE |
-+					 IDE_TFLAG_OUT_NSECT;
-+		}
-  	}
-+	tf->command = args[0];
-+	ide_tf_load(drive, &ltask);
-+	ide_execute_command(drive, args[0], &drive_cmd_intr, WAIT_WORSTCASE, NULL);
-+	return ide_started;
- 
- done:
+- 	}
+-
+-done:
++		return do_rw_taskfile(drive, task);
++	}
++
   	/*
-@@ -1003,6 +972,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
+  	 * NULL is actually a valid way of waiting for
+  	 * all current requests to be flushed from the queue.
+@@ -970,8 +854,7 @@ static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
+ 		if (rc)
+ 			printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
+ 		SELECT_DRIVE(drive);
+-		if (IDE_CONTROL_REG)
+-			HWIF(drive)->OUTB(drive->ctl, IDE_CONTROL_REG);
++		ide_set_irq(drive, 1);
+ 		rc = ide_wait_not_busy(HWIF(drive), 100000);
+ 		if (rc)
+ 			printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
+@@ -1003,6 +886,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
  
  	/* bail early if we've exceeded max_failures */
  	if (drive->max_failures && (drive->failures > drive->max_failures)) {
@@ -55029,30 +58456,46 @@
  		goto kill_rq;
  	}
  
-@@ -1035,7 +1005,6 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
+@@ -1034,9 +918,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
+ 		if (drive->current_speed == 0xff)
  			ide_config_drive_speed(drive, drive->desired_speed);
  
- 		if (rq->cmd_type == REQ_TYPE_ATA_CMD ||
+-		if (rq->cmd_type == REQ_TYPE_ATA_CMD ||
 -		    rq->cmd_type == REQ_TYPE_ATA_TASK ||
- 		    rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
+-		    rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
++		if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
  			return execute_drive_cmd(drive, rq);
  		else if (blk_pm_request(rq)) {
-@@ -1247,8 +1216,12 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
- 		if (hwgroup->hwif->sharing_irq &&
- 		    hwif != hwgroup->hwif &&
- 		    hwif->io_ports[IDE_CONTROL_OFFSET]) {
+ 			struct request_pm_state *pm = rq->data;
+@@ -1244,11 +1126,13 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
+ 		}
+ 	again:
+ 		hwif = HWIF(drive);
+-		if (hwgroup->hwif->sharing_irq &&
+-		    hwif != hwgroup->hwif &&
+-		    hwif->io_ports[IDE_CONTROL_OFFSET]) {
 -			/* set nIEN for previous hwif */
 -			SELECT_INTERRUPT(drive);
++		if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif) {
 +			/*
 +			 * set nIEN for previous hwif, drives in the
 +			 * quirk_list may not like intr setups/cleanups
 +			 */
 +			if (drive->quirk_list != 1)
-+				hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG);
++				ide_set_irq(drive, 0);
  		}
  		hwgroup->hwif = hwif;
  		hwgroup->drive = drive;
-@@ -1454,12 +1427,8 @@ void ide_timer_expiry (unsigned long data)
+@@ -1361,7 +1245,7 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
+ 	 */
+ 	drive->retry_pio++;
+ 	drive->state = DMA_PIO_RETRY;
+-	hwif->dma_off_quietly(drive);
++	ide_dma_off_quietly(drive);
+ 
+ 	/*
+ 	 * un-busy drive etc (hwgroup->busy is cleared on return) and
+@@ -1454,12 +1338,8 @@ void ide_timer_expiry (unsigned long data)
  			 */
  			spin_unlock(&ide_lock);
  			hwif  = HWIF(drive);
@@ -55065,7 +58508,15 @@
  			/* local CPU only,
  			 * as if we were handling an interrupt */
  			local_irq_disable();
-@@ -1785,3 +1754,19 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
+@@ -1710,7 +1590,6 @@ irqreturn_t ide_intr (int irq, void *dev_id)
+ void ide_init_drive_cmd (struct request *rq)
+ {
+ 	memset(rq, 0, sizeof(*rq));
+-	rq->cmd_type = REQ_TYPE_ATA_CMD;
+ 	rq->ref_count = 1;
+ }
+ 
+@@ -1785,3 +1664,19 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
  }
  
  EXPORT_SYMBOL(ide_do_drive_cmd);
@@ -55086,7 +58537,7 @@
 +
 +EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
 diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
-index bb9693d..c97c071 100644
+index bb9693d..e2a7e95 100644
 --- a/drivers/ide/ide-iops.c
 +++ b/drivers/ide/ide-iops.c
 @@ -158,14 +158,6 @@ void default_hwif_mmiops (ide_hwif_t *hwif)
@@ -55155,7 +58606,7 @@
 -	    (args->tfRegister[IDE_SECTOR_OFFSET] > XFER_UDMA_2) &&
 -	    (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER)) {
 +	if (args->tf.command == WIN_SETFEATURES &&
-+	    args->tf.lbal > XFER_UDMA_2 &&
++	    args->tf.nsect > XFER_UDMA_2 &&
 +	    args->tf.feature == SETFEATURES_XFER) {
  		if (eighty_ninty_three(drive) == 0) {
  			printk(KERN_WARNING "%s: UDMA speeds >UDMA33 cannot "
@@ -55168,12 +58619,66 @@
 -	    (args->tfRegister[IDE_SECTOR_OFFSET] >= XFER_SW_DMA_0) &&
 -	    (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER) &&
 +	if (args->tf.command == WIN_SETFEATURES &&
-+	    args->tf.lbal >= XFER_SW_DMA_0 &&
++	    args->tf.nsect >= XFER_SW_DMA_0 &&
 +	    args->tf.feature == SETFEATURES_XFER &&
  	    (drive->id->dma_ultra ||
  	     drive->id->dma_mword ||
  	     drive->id->dma_1word))
-@@ -902,8 +878,9 @@ EXPORT_SYMBOL(ide_set_handler);
+@@ -712,8 +688,7 @@ int ide_driveid_update(ide_drive_t *drive)
+ 	 */
+ 
+ 	SELECT_MASK(drive, 1);
+-	if (IDE_CONTROL_REG)
+-		hwif->OUTB(drive->ctl,IDE_CONTROL_REG);
++	ide_set_irq(drive, 1);
+ 	msleep(50);
+ 	hwif->OUTB(WIN_IDENTIFY, IDE_COMMAND_REG);
+ 	timeout = jiffies + WAIT_WORSTCASE;
+@@ -766,8 +741,8 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
+ //		msleep(50);
+ 
+ #ifdef CONFIG_BLK_DEV_IDEDMA
+-	if (hwif->ide_dma_on)	/* check if host supports DMA */
+-		hwif->dma_host_off(drive);
++	if (hwif->dma_host_set)	/* check if host supports DMA */
++		hwif->dma_host_set(drive, 0);
+ #endif
+ 
+ 	/* Skip setting PIO flow-control modes on pre-EIDE drives */
+@@ -796,13 +771,12 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
+ 	SELECT_DRIVE(drive);
+ 	SELECT_MASK(drive, 0);
+ 	udelay(1);
+-	if (IDE_CONTROL_REG)
+-		hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG);
++	ide_set_irq(drive, 0);
+ 	hwif->OUTB(speed, IDE_NSECTOR_REG);
+ 	hwif->OUTB(SETFEATURES_XFER, IDE_FEATURE_REG);
+ 	hwif->OUTBSYNC(drive, WIN_SETFEATURES, IDE_COMMAND_REG);
+-	if ((IDE_CONTROL_REG) && (drive->quirk_list == 2))
+-		hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
++	if (drive->quirk_list == 2)
++		ide_set_irq(drive, 1);
+ 
+ 	error = __ide_wait_stat(drive, drive->ready_stat,
+ 				BUSY_STAT|DRQ_STAT|ERR_STAT,
+@@ -823,10 +797,11 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
+ 
+  skip:
+ #ifdef CONFIG_BLK_DEV_IDEDMA
+-	if (speed >= XFER_SW_DMA_0)
+-		hwif->dma_host_on(drive);
+-	else if (hwif->ide_dma_on)	/* check if host supports DMA */
+-		hwif->dma_off_quietly(drive);
++	if ((speed >= XFER_SW_DMA_0 || (hwif->host_flags & IDE_HFLAG_VDMA)) &&
++	    drive->using_dma)
++		hwif->dma_host_set(drive, 1);
++	else if (hwif->dma_host_set)	/* check if host supports DMA */
++		ide_dma_off_quietly(drive);
+ #endif
+ 
+ 	switch(speed) {
+@@ -902,8 +877,9 @@ EXPORT_SYMBOL(ide_set_handler);
   *	handler and IRQ setup do not race. All IDE command kick off
   *	should go via this function or do equivalent locking.
   */
@@ -55185,7 +58690,20 @@
  {
  	unsigned long flags;
  	ide_hwgroup_t *hwgroup = HWGROUP(drive);
-@@ -1051,8 +1028,7 @@ static void ide_disk_pre_reset(ide_drive_t *drive)
+@@ -1035,10 +1011,10 @@ static void check_dma_crc(ide_drive_t *drive)
+ {
+ #ifdef CONFIG_BLK_DEV_IDEDMA
+ 	if (drive->crc_count) {
+-		drive->hwif->dma_off_quietly(drive);
++		ide_dma_off_quietly(drive);
+ 		ide_set_xfer_rate(drive, ide_auto_reduce_xfer(drive));
+ 		if (drive->current_speed >= XFER_SW_DMA_0)
+-			(void) HWIF(drive)->ide_dma_on(drive);
++			ide_dma_on(drive);
+ 	} else
+ 		ide_dma_off(drive);
+ #endif
+@@ -1051,8 +1027,7 @@ static void ide_disk_pre_reset(ide_drive_t *drive)
  	drive->special.all = 0;
  	drive->special.b.set_geometry = legacy;
  	drive->special.b.recalibrate  = legacy;
@@ -55195,7 +58713,7 @@
  	if (!drive->keep_settings && !drive->using_dma)
  		drive->mult_req = 0;
  	if (drive->mult_req != drive->mult_count)
-@@ -1137,7 +1113,6 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
+@@ -1137,7 +1112,6 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
  	for (unit = 0; unit < MAX_DRIVES; ++unit)
  		pre_reset(&hwif->drives[unit]);
  
@@ -55203,7 +58721,7 @@
  	if (!IDE_CONTROL_REG) {
  		spin_unlock_irqrestore(&ide_lock, flags);
  		return ide_stopped;
-@@ -1174,11 +1149,8 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
+@@ -1174,11 +1148,8 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
  	 * state when the disks are reset this way. At least, the Winbond
  	 * 553 documentation says that
  	 */
@@ -55217,7 +58735,7 @@
  	spin_unlock_irqrestore(&ide_lock, flags);
  	return ide_started;
 diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
-index 062d3bc..a3bd8e8 100644
+index 062d3bc..9b44fbd 100644
 --- a/drivers/ide/ide-lib.c
 +++ b/drivers/ide/ide-lib.c
 @@ -441,6 +441,12 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
@@ -55233,28 +58751,47 @@
  
  	return ide_set_dma_mode(drive, rate);
  }
-@@ -458,8 +464,7 @@ static void ide_dump_opcode(ide_drive_t *drive)
+@@ -448,8 +454,7 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
+ static void ide_dump_opcode(ide_drive_t *drive)
+ {
+ 	struct request *rq;
+-	u8 opcode = 0;
+-	int found = 0;
++	ide_task_t *task = NULL;
+ 
+ 	spin_lock(&ide_lock);
+ 	rq = NULL;
+@@ -458,164 +463,129 @@ static void ide_dump_opcode(ide_drive_t *drive)
  	spin_unlock(&ide_lock);
  	if (!rq)
  		return;
 -	if (rq->cmd_type == REQ_TYPE_ATA_CMD ||
 -	    rq->cmd_type == REQ_TYPE_ATA_TASK) {
-+	if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
- 		char *args = rq->buffer;
- 		if (args) {
- 			opcode = args[0];
-@@ -468,8 +473,7 @@ static void ide_dump_opcode(ide_drive_t *drive)
- 	} else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
- 		ide_task_t *args = rq->special;
- 		if (args) {
+-		char *args = rq->buffer;
+-		if (args) {
+-			opcode = args[0];
+-			found = 1;
+-		}
+-	} else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+-		ide_task_t *args = rq->special;
+-		if (args) {
 -			task_struct_t *tf = (task_struct_t *) args->tfRegister;
 -			opcode = tf->command;
-+			opcode = args->tf.command;
- 			found = 1;
- 		}
- 	}
-@@ -481,141 +485,118 @@ static void ide_dump_opcode(ide_drive_t *drive)
- 		printk("0x%02x\n", opcode);
+-			found = 1;
+-		}
+-	}
++
++	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
++		task = rq->special;
+ 
+ 	printk("ide: failed opcode was: ");
+-	if (!found)
+-		printk("unknown\n");
++	if (task == NULL)
++		printk(KERN_CONT "unknown\n");
+ 	else
+-		printk("0x%02x\n", opcode);
++		printk(KERN_CONT "0x%02x\n", task->tf.command);
  }
  
 -static u8 ide_dump_ata_status(ide_drive_t *drive, const char *msg, u8 stat)
@@ -55481,8 +59018,62 @@
  }
  
  EXPORT_SYMBOL(ide_dump_status);
+diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
+index e245521..cbbb0f7 100644
+--- a/drivers/ide/ide-pnp.c
++++ b/drivers/ide/ide-pnp.c
+@@ -31,7 +31,6 @@ static int idepnp_probe(struct pnp_dev * dev, const struct pnp_device_id *dev_id
+ {
+ 	hw_regs_t hw;
+ 	ide_hwif_t *hwif;
+-	int index;
+ 
+ 	if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0)))
+ 		return -1;
+@@ -41,11 +40,19 @@ static int idepnp_probe(struct pnp_dev * dev, const struct pnp_device_id *dev_id
+ 				pnp_port_start(dev, 1));
+ 	hw.irq = pnp_irq(dev, 0);
+ 
+-	index = ide_register_hw(&hw, NULL, 1, &hwif);
++	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
++	if (hwif) {
++		u8 index = hwif->index;
++		u8 idx[4] = { index, 0xff, 0xff, 0xff };
++
++		ide_init_port_data(hwif, index);
++		ide_init_port_hw(hwif, &hw);
+ 
+-	if (index != -1) {
+-	    	printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index);
++		printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index);
+ 		pnp_set_drvdata(dev,hwif);
++
++		ide_device_add(idx);
++
+ 		return 0;
+ 	}
+ 
+@@ -68,12 +75,15 @@ static struct pnp_driver idepnp_driver = {
+ 	.remove		= idepnp_remove,
+ };
+ 
+-void __init pnpide_init(void)
++static int __init pnpide_init(void)
+ {
+-	pnp_register_driver(&idepnp_driver);
++	return pnp_register_driver(&idepnp_driver);
+ }
+ 
+-void __exit pnpide_exit(void)
++static void __exit pnpide_exit(void)
+ {
+ 	pnp_unregister_driver(&idepnp_driver);
+ }
++
++module_init(pnpide_init);
++module_exit(pnpide_exit);
 diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
-index 2994523..0379d1f 100644
+index 2994523..edf650b 100644
 --- a/drivers/ide/ide-probe.c
 +++ b/drivers/ide/ide-probe.c
 @@ -95,10 +95,10 @@ static void ide_disk_init_mult_count(ide_drive_t *drive)
@@ -55498,58 +59089,243 @@
  		if (drive->mult_req > id->max_multsect)
  			drive->mult_req = id->max_multsect;
  		if (drive->mult_req || ((id->multsect_valid & 1) && id->multsect))
-@@ -234,7 +234,10 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
+@@ -234,7 +234,7 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
  
  	drive->media = ide_disk;
  	printk("%s DISK drive\n", (id->config == 0x848a) ? "CFA" : "ATA" );
 -	QUIRK_LIST(drive);
 +
-+	if (hwif->quirkproc)
-+		drive->quirk_list = hwif->quirkproc(drive);
-+
  	return;
  
  err_misc:
-@@ -830,16 +833,8 @@ static void probe_hwif(ide_hwif_t *hwif)
- 
- 			drive->nice1 = 1;
+@@ -350,22 +350,19 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
+ 	 * the irq handler isn't expecting.
+ 	 */
+ 	if (IDE_CONTROL_REG) {
+-		u8 ctl = drive->ctl | 2;
+ 		if (!hwif->irq) {
+ 			autoprobe = 1;
+ 			cookie = probe_irq_on();
+-			/* enable device irq */
+-			ctl &= ~2;
+ 		}
+-		hwif->OUTB(ctl, IDE_CONTROL_REG);
++		ide_set_irq(drive, autoprobe);
+ 	}
+ 
+ 	retval = actual_try_to_identify(drive, cmd);
+ 
+ 	if (autoprobe) {
+ 		int irq;
+-		/* mask device irq */
+-		hwif->OUTB(drive->ctl|2, IDE_CONTROL_REG);
++
++		ide_set_irq(drive, 0);
+ 		/* clear drive IRQ */
+ 		(void) hwif->INB(IDE_STATUS_REG);
+ 		udelay(5);
+@@ -385,6 +382,20 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
+ 	return retval;
+ }
  
--			if (hwif->ide_dma_on) {
--				/*
--				 * Force DMAing for the beginning of the check.
--				 * Some chipsets appear to do interesting
--				 * things, if not checked and cleared.
--				 *   PARANOIA!!!
--				 */
--				hwif->dma_off_quietly(drive);
-+			if (hwif->ide_dma_on)
- 				ide_set_dma(drive);
--			}
- 		}
- 	}
++static int ide_busy_sleep(ide_hwif_t *hwif)
++{
++	unsigned long timeout = jiffies + WAIT_WORSTCASE;
++	u8 stat;
++
++	do {
++		msleep(50);
++		stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
++		if ((stat & BUSY_STAT) == 0)
++			return 0;
++	} while (time_before(jiffies, timeout));
++
++	return 1;
++}
  
-@@ -968,11 +963,6 @@ static int ide_init_queue(ide_drive_t *drive)
-  * Much of the code is for correctly detecting/handling irq sharing
-  * and irq serialization situations.  This is somewhat complex because
-  * it handles static as well as dynamic (PCMCIA) IDE interfaces.
-- *
-- * The IRQF_DISABLED in sa_flags means ide_intr() is always entered with
-- * interrupts completely disabled.  This can be bad for interrupt latency,
-- * but anything else has led to problems on some machines.  We re-enable
-- * interrupts as much as we can safely do in most places.
-  */
- static int init_irq (ide_hwif_t *hwif)
+ /**
+  *	do_probe		-	probe an IDE device
+@@ -453,7 +464,6 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
+ 		if ((rc == 1 && cmd == WIN_PIDENTIFY) &&
+ 			((drive->autotune == IDE_TUNE_DEFAULT) ||
+ 			(drive->autotune == IDE_TUNE_AUTO))) {
+-			unsigned long timeout;
+ 			printk("%s: no response (status = 0x%02x), "
+ 				"resetting drive\n", drive->name,
+ 				hwif->INB(IDE_STATUS_REG));
+@@ -461,10 +471,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
+ 			hwif->OUTB(drive->select.all, IDE_SELECT_REG);
+ 			msleep(50);
+ 			hwif->OUTB(WIN_SRST, IDE_COMMAND_REG);
+-			timeout = jiffies;
+-			while (((hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) &&
+-			       time_before(jiffies, timeout + WAIT_WORSTCASE))
+-				msleep(50);
++			(void)ide_busy_sleep(hwif);
+ 			rc = try_to_identify(drive, cmd);
+ 		}
+ 		if (rc == 1)
+@@ -492,20 +499,16 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
+ static void enable_nest (ide_drive_t *drive)
  {
-@@ -1055,17 +1045,13 @@ static int init_irq (ide_hwif_t *hwif)
- 	 * Allocate the irq, if not already obtained for another hwif
- 	 */
- 	if (!match || match->irq != hwif->irq) {
--		int sa = IRQF_DISABLED;
-+		int sa = 0;
- #if defined(__mc68000__) || defined(CONFIG_APUS)
- 		sa = IRQF_SHARED;
- #endif /* __mc68000__ || CONFIG_APUS */
- 
+ 	ide_hwif_t *hwif = HWIF(drive);
+-	unsigned long timeout;
+ 
+ 	printk("%s: enabling %s -- ", hwif->name, drive->id->model);
+ 	SELECT_DRIVE(drive);
+ 	msleep(50);
+ 	hwif->OUTB(EXABYTE_ENABLE_NEST, IDE_COMMAND_REG);
+-	timeout = jiffies + WAIT_WORSTCASE;
+-	do {
+-		if (time_after(jiffies, timeout)) {
+-			printk("failed (timeout)\n");
+-			return;
+-		}
+-		msleep(50);
+-	} while ((hwif->INB(IDE_STATUS_REG)) & BUSY_STAT);
++
++	if (ide_busy_sleep(hwif)) {
++		printk(KERN_CONT "failed (timeout)\n");
++		return;
++	}
+ 
+ 	msleep(50);
+ 
+@@ -653,8 +656,7 @@ static int wait_hwif_ready(ide_hwif_t *hwif)
+ 		/* Ignore disks that we will not probe for later. */
+ 		if (!drive->noprobe || drive->present) {
+ 			SELECT_DRIVE(drive);
+-			if (IDE_CONTROL_REG)
+-				hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
++			ide_set_irq(drive, 1);
+ 			mdelay(2);
+ 			rc = ide_wait_not_busy(hwif, 35000);
+ 			if (rc)
+@@ -673,19 +675,18 @@ out:
+ 
+ /**
+  *	ide_undecoded_slave	-	look for bad CF adapters
+- *	@hwif: interface
++ *	@drive1: drive
+  *
+  *	Analyse the drives on the interface and attempt to decide if we
+  *	have the same drive viewed twice. This occurs with crap CF adapters
+  *	and PCMCIA sometimes.
+  */
+ 
+-void ide_undecoded_slave(ide_hwif_t *hwif)
++void ide_undecoded_slave(ide_drive_t *drive1)
+ {
+-	ide_drive_t *drive0 = &hwif->drives[0];
+-	ide_drive_t *drive1 = &hwif->drives[1];
++	ide_drive_t *drive0 = &drive1->hwif->drives[0];
+ 
+-	if (drive0->present == 0 || drive1->present == 0)
++	if ((drive1->dn & 1) == 0 || drive0->present == 0)
+ 		return;
+ 
+ 	/* If the models don't match they are not the same product */
+@@ -788,18 +789,11 @@ static void probe_hwif(ide_hwif_t *hwif)
+ 		}
+ 	}
+ 	if (hwif->io_ports[IDE_CONTROL_OFFSET] && hwif->reset) {
+-		unsigned long timeout = jiffies + WAIT_WORSTCASE;
+-		u8 stat;
+-
+ 		printk(KERN_WARNING "%s: reset\n", hwif->name);
+ 		hwif->OUTB(12, hwif->io_ports[IDE_CONTROL_OFFSET]);
+ 		udelay(10);
+ 		hwif->OUTB(8, hwif->io_ports[IDE_CONTROL_OFFSET]);
+-		do {
+-			msleep(50);
+-			stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+-		} while ((stat & BUSY_STAT) && time_after(timeout, jiffies));
+-
++		(void)ide_busy_sleep(hwif);
+ 	}
+ 	local_irq_restore(flags);
+ 	/*
+@@ -814,8 +808,12 @@ static void probe_hwif(ide_hwif_t *hwif)
+ 		return;
+ 	}
+ 
+-	if (hwif->fixup)
+-		hwif->fixup(hwif);
++	for (unit = 0; unit < MAX_DRIVES; unit++) {
++		ide_drive_t *drive = &hwif->drives[unit];
++
++		if (drive->present && hwif->quirkproc)
++			hwif->quirkproc(drive);
++	}
+ 
+ 	for (unit = 0; unit < MAX_DRIVES; ++unit) {
+ 		ide_drive_t *drive = &hwif->drives[unit];
+@@ -830,16 +828,8 @@ static void probe_hwif(ide_hwif_t *hwif)
+ 
+ 			drive->nice1 = 1;
+ 
+-			if (hwif->ide_dma_on) {
+-				/*
+-				 * Force DMAing for the beginning of the check.
+-				 * Some chipsets appear to do interesting
+-				 * things, if not checked and cleared.
+-				 *   PARANOIA!!!
+-				 */
+-				hwif->dma_off_quietly(drive);
++			if (hwif->dma_host_set)
+ 				ide_set_dma(drive);
+-			}
+ 		}
+ 	}
+ 
+@@ -853,25 +843,6 @@ static void probe_hwif(ide_hwif_t *hwif)
+ 	}
+ }
+ 
+-static int hwif_init(ide_hwif_t *hwif);
+-static void hwif_register_devices(ide_hwif_t *hwif);
+-
+-static int probe_hwif_init(ide_hwif_t *hwif)
+-{
+-	probe_hwif(hwif);
+-
+-	if (!hwif_init(hwif)) {
+-		printk(KERN_INFO "%s: failed to initialize IDE interface\n",
+-				 hwif->name);
+-		return -1;
+-	}
+-
+-	if (hwif->present)
+-		hwif_register_devices(hwif);
+-
+-	return 0;
+-}
+-
+ #if MAX_HWIFS > 1
+ /*
+  * save_match() is used to simplify logic in init_irq() below.
+@@ -968,11 +939,6 @@ static int ide_init_queue(ide_drive_t *drive)
+  * Much of the code is for correctly detecting/handling irq sharing
+  * and irq serialization situations.  This is somewhat complex because
+  * it handles static as well as dynamic (PCMCIA) IDE interfaces.
+- *
+- * The IRQF_DISABLED in sa_flags means ide_intr() is always entered with
+- * interrupts completely disabled.  This can be bad for interrupt latency,
+- * but anything else has led to problems on some machines.  We re-enable
+- * interrupts as much as we can safely do in most places.
+  */
+ static int init_irq (ide_hwif_t *hwif)
+ {
+@@ -1055,17 +1021,13 @@ static int init_irq (ide_hwif_t *hwif)
+ 	 * Allocate the irq, if not already obtained for another hwif
+ 	 */
+ 	if (!match || match->irq != hwif->irq) {
+-		int sa = IRQF_DISABLED;
++		int sa = 0;
+ #if defined(__mc68000__) || defined(CONFIG_APUS)
+ 		sa = IRQF_SHARED;
+ #endif /* __mc68000__ || CONFIG_APUS */
+ 
 -		if (IDE_CHIPSET_IS_PCI(hwif->chipset)) {
 +		if (IDE_CHIPSET_IS_PCI(hwif->chipset))
  			sa = IRQF_SHARED;
@@ -55560,7 +59336,7 @@
  
  		if (hwif->io_ports[IDE_CONTROL_OFFSET])
  			/* clear nIEN */
-@@ -1173,7 +1159,7 @@ static struct kobject *exact_match(dev_t dev, int *part, void *data)
+@@ -1173,7 +1135,7 @@ static struct kobject *exact_match(dev_t dev, int *part, void *data)
  {
  	struct gendisk *p = data;
  	*part &= (1 << PARTN_BITS) - 1;
@@ -55569,8 +59345,262 @@
  }
  
  static int exact_lock(dev_t dev, void *data)
+@@ -1373,54 +1335,63 @@ static void hwif_register_devices(ide_hwif_t *hwif)
+ 	}
+ }
+ 
+-int ideprobe_init (void)
++int ide_device_add_all(u8 *idx)
+ {
+-	unsigned int index;
+-	int probe[MAX_HWIFS];
+-
+-	memset(probe, 0, MAX_HWIFS * sizeof(int));
+-	for (index = 0; index < MAX_HWIFS; ++index)
+-		probe[index] = !ide_hwifs[index].present;
+-
+-	for (index = 0; index < MAX_HWIFS; ++index)
+-		if (probe[index])
+-			probe_hwif(&ide_hwifs[index]);
+-	for (index = 0; index < MAX_HWIFS; ++index)
+-		if (probe[index])
+-			hwif_init(&ide_hwifs[index]);
+-	for (index = 0; index < MAX_HWIFS; ++index) {
+-		if (probe[index]) {
+-			ide_hwif_t *hwif = &ide_hwifs[index];
+-			if (!hwif->present)
+-				continue;
+-			if (hwif->chipset == ide_unknown || hwif->chipset == ide_forced)
+-				hwif->chipset = ide_generic;
+-			hwif_register_devices(hwif);
++	ide_hwif_t *hwif;
++	int i, rc = 0;
++
++	for (i = 0; i < MAX_HWIFS; i++) {
++		if (idx[i] == 0xff)
++			continue;
++
++		probe_hwif(&ide_hwifs[idx[i]]);
++	}
++
++	for (i = 0; i < MAX_HWIFS; i++) {
++		if (idx[i] == 0xff)
++			continue;
++
++		hwif = &ide_hwifs[idx[i]];
++
++		if (hwif_init(hwif) == 0) {
++			printk(KERN_INFO "%s: failed to initialize IDE "
++					 "interface\n", hwif->name);
++			rc = -1;
++			continue;
+ 		}
+ 	}
+-	for (index = 0; index < MAX_HWIFS; ++index)
+-		if (probe[index])
+-			ide_proc_register_port(&ide_hwifs[index]);
+-	return 0;
+-}
+ 
+-EXPORT_SYMBOL_GPL(ideprobe_init);
++	for (i = 0; i < MAX_HWIFS; i++) {
++		if (idx[i] == 0xff)
++			continue;
+ 
+-int ide_device_add(u8 idx[4])
+-{
+-	int i, rc = 0;
++		hwif = &ide_hwifs[idx[i]];
+ 
+-	for (i = 0; i < 4; i++) {
+-		if (idx[i] != 0xff)
+-			rc |= probe_hwif_init(&ide_hwifs[idx[i]]);
++		if (hwif->present) {
++			if (hwif->chipset == ide_unknown ||
++			    hwif->chipset == ide_forced)
++				hwif->chipset = ide_generic;
++			hwif_register_devices(hwif);
++		}
+ 	}
+ 
+-	for (i = 0; i < 4; i++) {
++	for (i = 0; i < MAX_HWIFS; i++) {
+ 		if (idx[i] != 0xff)
+ 			ide_proc_register_port(&ide_hwifs[idx[i]]);
+ 	}
+ 
+ 	return rc;
+ }
++EXPORT_SYMBOL_GPL(ide_device_add_all);
++
++int ide_device_add(u8 idx[4])
++{
++	u8 idx_all[MAX_HWIFS];
++	int i;
+ 
++	for (i = 0; i < MAX_HWIFS; i++)
++		idx_all[i] = (i < 4) ? idx[i] : 0xff;
++
++	return ide_device_add_all(idx_all);
++}
+ EXPORT_SYMBOL_GPL(ide_device_add);
+diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
+index a4007d3..aa663e7 100644
+--- a/drivers/ide/ide-proc.c
++++ b/drivers/ide/ide-proc.c
+@@ -346,14 +346,20 @@ static int ide_write_setting(ide_drive_t *drive, ide_settings_t *setting, int va
+ 
+ static int set_xfer_rate (ide_drive_t *drive, int arg)
+ {
++	ide_task_t task;
+ 	int err;
+ 
+ 	if (arg < 0 || arg > 70)
+ 		return -EINVAL;
+ 
+-	err = ide_wait_cmd(drive,
+-			WIN_SETFEATURES, (u8) arg,
+-			SETFEATURES_XFER, 0, NULL);
++	memset(&task, 0, sizeof(task));
++	task.tf.command = WIN_SETFEATURES;
++	task.tf.feature = SETFEATURES_XFER;
++	task.tf.nsect   = (u8)arg;
++	task.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT |
++			IDE_TFLAG_IN_NSECT;
++
++	err = ide_no_data_taskfile(drive, &task);
+ 
+ 	if (!err && arg) {
+ 		ide_set_xfer_rate(drive, (u8) arg);
+diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c
+new file mode 100644
+index 0000000..7ffa332
+--- /dev/null
++++ b/drivers/ide/ide-scan-pci.c
+@@ -0,0 +1,121 @@
++/*
++ * support for probing IDE PCI devices in the PCI bus order
++ *
++ * Copyright (c) 1998-2000  Andre Hedrick <andre at linux-ide.org>
++ * Copyright (c) 1995-1998  Mark Lord
++ *
++ * May be copied or modified under the terms of the GNU General Public License
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/ide.h>
++
++/*
++ *	Module interfaces
++ */
++
++static int pre_init = 1;		/* Before first ordered IDE scan */
++static LIST_HEAD(ide_pci_drivers);
++
++/*
++ *	__ide_pci_register_driver	-	attach IDE driver
++ *	@driver: pci driver
++ *	@module: owner module of the driver
++ *
++ *	Registers a driver with the IDE layer. The IDE layer arranges that
++ *	boot time setup is done in the expected device order and then
++ *	hands the controllers off to the core PCI code to do the rest of
++ *	the work.
++ *
++ *	Returns are the same as for pci_register_driver
++ */
++
++int __ide_pci_register_driver(struct pci_driver *driver, struct module *module,
++			      const char *mod_name)
++{
++	if (!pre_init)
++		return __pci_register_driver(driver, module, mod_name);
++	driver->driver.owner = module;
++	list_add_tail(&driver->node, &ide_pci_drivers);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(__ide_pci_register_driver);
++
++/**
++ *	ide_scan_pcidev		-	find an IDE driver for a device
++ *	@dev: PCI device to check
++ *
++ *	Look for an IDE driver to handle the device we are considering.
++ *	This is only used during boot up to get the ordering correct. After
++ *	boot up the pci layer takes over the job.
++ */
++
++static int __init ide_scan_pcidev(struct pci_dev *dev)
++{
++	struct list_head *l;
++	struct pci_driver *d;
++
++	list_for_each(l, &ide_pci_drivers) {
++		d = list_entry(l, struct pci_driver, node);
++		if (d->id_table) {
++			const struct pci_device_id *id =
++				pci_match_id(d->id_table, dev);
++
++			if (id != NULL && d->probe(dev, id) >= 0) {
++				dev->driver = d;
++				pci_dev_get(dev);
++				return 1;
++			}
++		}
++	}
++	return 0;
++}
++
++/**
++ *	ide_scan_pcibus		-	perform the initial IDE driver scan
++ *
++ *	Perform the initial bus rather than driver ordered scan of the
++ *	PCI drivers. After this all IDE pci handling becomes standard
++ *	module ordering not traditionally ordered.
++ */
++
++int __init ide_scan_pcibus(void)
++{
++	struct pci_dev *dev = NULL;
++	struct pci_driver *d;
++	struct list_head *l, *n;
++
++	pre_init = 0;
++	if (!ide_scan_direction)
++		while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)))
++			ide_scan_pcidev(dev);
++	else
++		while ((dev = pci_get_device_reverse(PCI_ANY_ID, PCI_ANY_ID,
++						     dev)))
++			ide_scan_pcidev(dev);
++
++	/*
++	 *	Hand the drivers over to the PCI layer now we
++	 *	are post init.
++	 */
++
++	list_for_each_safe(l, n, &ide_pci_drivers) {
++		list_del(l);
++		d = list_entry(l, struct pci_driver, node);
++		if (__pci_register_driver(d, d->driver.owner,
++					  d->driver.mod_name))
++			printk(KERN_ERR "%s: failed to register %s driver\n",
++					__FUNCTION__, d->driver.mod_name);
++	}
++
++	return 0;
++}
++
++static int __init ide_scan_pci(void)
++{
++	return ide_scan_pcibus();
++}
++
++module_init(ide_scan_pci);
 diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
-index 7b9181b..3cbca3f 100644
+index 7b9181b..d71a584 100644
 --- a/drivers/ide/ide-tape.c
 +++ b/drivers/ide/ide-tape.c
 @@ -615,16 +615,6 @@ typedef struct os_dat_s {
@@ -55590,7 +59620,19 @@
   *	Read/Write error simulation
   */
  #define SIMULATE_ERRORS			0
-@@ -1818,9 +1808,8 @@ static ide_startstop_t idetape_retry_pc (ide_drive_t *drive)
+@@ -1700,6 +1690,11 @@ static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
+ 	if (error)
+ 		tape->failed_pc = NULL;
+ 
++	if (!blk_special_request(rq)) {
++		ide_end_request(drive, uptodate, nr_sects);
++		return 0;
++	}
++
+ 	spin_lock_irqsave(&tape->spinlock, flags);
+ 
+ 	/* The request was a pipelined data transfer request */
+@@ -1818,9 +1813,8 @@ static ide_startstop_t idetape_retry_pc (ide_drive_t *drive)
  	idetape_tape_t *tape = drive->driver_data;
  	idetape_pc_t *pc;
  	struct request *rq;
@@ -55601,7 +59643,7 @@
  	pc = idetape_next_pc_storage(drive);
  	rq = idetape_next_rq_storage(drive);
  	idetape_create_request_sense_cmd(pc);
-@@ -1858,15 +1847,13 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
+@@ -1858,15 +1852,13 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
  {
  	ide_hwif_t *hwif = drive->hwif;
  	idetape_tape_t *tape = drive->driver_data;
@@ -55619,7 +59661,7 @@
  
  #if IDETAPE_DEBUG_LOG
  	if (tape->debug_level >= 4)
-@@ -1875,10 +1862,10 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
+@@ -1875,10 +1867,10 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
  #endif /* IDETAPE_DEBUG_LOG */	
  
  	/* Clear the interrupt */
@@ -55632,7 +59674,7 @@
  			/*
  			 * A DMA error is sometimes expected. For example,
  			 * if the tape is crossing a filemark during a
-@@ -1912,7 +1899,7 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
+@@ -1912,7 +1904,7 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
  	}
  
  	/* No more interrupts */
@@ -55641,7 +59683,7 @@
  #if IDETAPE_DEBUG_LOG
  		if (tape->debug_level >= 2)
  			printk(KERN_INFO "ide-tape: Packet command completed, %d bytes transferred\n", pc->actually_transferred);
-@@ -1927,12 +1914,13 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
+@@ -1927,12 +1919,13 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
  		    (++error_sim_count % 100) == 0) {
  			printk(KERN_INFO "ide-tape: %s: simulating error\n",
  				tape->name);
@@ -55659,7 +59701,7 @@
  #if IDETAPE_DEBUG_LOG
  			if (tape->debug_level >= 1)
  				printk(KERN_INFO "ide-tape: %s: I/O error\n",
-@@ -1951,7 +1939,7 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
+@@ -1951,7 +1944,7 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
  		}
  		pc->error = 0;
  		if (test_bit(PC_WAIT_FOR_DSC, &pc->flags) &&
@@ -55668,7 +59710,7 @@
  			/* Media access command */
  			tape->dsc_polling_start = jiffies;
  			tape->dsc_polling_frequency = IDETAPE_DSC_MA_FAST;
-@@ -1973,30 +1961,30 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
+@@ -1973,30 +1966,30 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
  		return ide_do_reset(drive);
  	}
  	/* Get the number of bytes to transfer on this interrupt. */
@@ -55708,7 +59750,7 @@
  				ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
  				return ide_started;
  			}
-@@ -2008,23 +1996,26 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
+@@ -2008,23 +2001,26 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
  	}
  	if (test_bit(PC_WRITING, &pc->flags)) {
  		if (pc->bh != NULL)
@@ -55742,7 +59784,7 @@
  #endif
  	/* And set the interrupt handler again */
  	ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
-@@ -2078,28 +2069,28 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
+@@ -2078,28 +2074,28 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
  	ide_hwif_t *hwif = drive->hwif;
  	idetape_tape_t *tape = drive->driver_data;
  	idetape_pc_t *pc = tape->pc;
@@ -55778,7 +59820,7 @@
  		printk(KERN_ERR "ide-tape: (IO,CoD) != (0,1) while issuing "
  				"a packet command\n");
  		return ide_do_reset(drive);
-@@ -2120,8 +2111,8 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape
+@@ -2120,8 +2116,8 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape
  {
  	ide_hwif_t *hwif = drive->hwif;
  	idetape_tape_t *tape = drive->driver_data;
@@ -55788,7 +59830,7 @@
  
  #if IDETAPE_DEBUG_BUGS
  	if (tape->pc->c[0] == IDETAPE_REQUEST_SENSE_CMD &&
-@@ -2170,7 +2161,7 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape
+@@ -2170,7 +2166,7 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape
  	pc->actually_transferred = 0;
  	pc->current_position = pc->buffer;
  	/* Request to transfer the entire buffer at once */
@@ -55797,7 +59839,7 @@
  
  	if (test_and_clear_bit(PC_DMA_ERROR, &pc->flags)) {
  		printk(KERN_WARNING "ide-tape: DMA disabled, "
-@@ -2180,12 +2171,9 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape
+@@ -2180,12 +2176,9 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape
  	if (test_bit(PC_DMA_RECOMMENDED, &pc->flags) && drive->using_dma)
  		dma_ok = !hwif->dma_setup(drive);
  
@@ -55813,7 +59855,7 @@
  	if (dma_ok)			/* Will begin DMA later */
  		set_bit(PC_DMA_IN_PROGRESS, &pc->flags);
  	if (test_bit(IDETAPE_DRQ_INTERRUPT, &tape->flags)) {
-@@ -2295,11 +2283,11 @@ static ide_startstop_t idetape_media_access_finished (ide_drive_t *drive)
+@@ -2295,11 +2288,11 @@ static ide_startstop_t idetape_media_access_finished (ide_drive_t *drive)
  {
  	idetape_tape_t *tape = drive->driver_data;
  	idetape_pc_t *pc = tape->pc;
@@ -55829,7 +59871,7 @@
  			/* Error detected */
  			if (pc->c[0] != IDETAPE_TEST_UNIT_READY_CMD)
  				printk(KERN_ERR "ide-tape: %s: I/O error, ",
-@@ -2417,7 +2405,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
+@@ -2417,7 +2410,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
  	idetape_tape_t *tape = drive->driver_data;
  	idetape_pc_t *pc = NULL;
  	struct request *postponed_rq = tape->postponed_rq;
@@ -55838,7 +59880,7 @@
  
  #if IDETAPE_DEBUG_LOG
  #if 0
-@@ -2465,7 +2453,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
+@@ -2465,7 +2458,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
  	 * If the tape is still busy, postpone our request and service
  	 * the other device meanwhile.
  	 */
@@ -55847,7 +59889,7 @@
  
  	if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2))
  		set_bit(IDETAPE_IGNORE_DSC, &tape->flags);
-@@ -2481,7 +2469,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
+@@ -2481,7 +2474,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
  		tape->insert_speed = tape->insert_size / 1024 * HZ / (jiffies - tape->insert_time);
  	calculate_speeds(drive);
  	if (!test_and_clear_bit(IDETAPE_IGNORE_DSC, &tape->flags) &&
@@ -55856,7 +59898,7 @@
  		if (postponed_rq == NULL) {
  			tape->dsc_polling_start = jiffies;
  			tape->dsc_polling_frequency = tape->best_dsc_rw_frequency;
-@@ -2502,9 +2490,6 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
+@@ -2502,9 +2495,6 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
  	}
  	if (rq->cmd[0] & REQ_IDETAPE_READ) {
  		tape->buffer_head++;
@@ -55866,7 +59908,7 @@
  		tape->postpone_cnt = 0;
  		pc = idetape_next_pc_storage(drive);
  		idetape_create_read_cmd(tape, pc, rq->current_nr_sectors, (struct idetape_bh *)rq->special);
-@@ -2512,9 +2497,6 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
+@@ -2512,9 +2502,6 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
  	}
  	if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
  		tape->buffer_head++;
@@ -55876,7 +59918,7 @@
  		tape->postpone_cnt = 0;
  		pc = idetape_next_pc_storage(drive);
  		idetape_create_write_cmd(tape, pc, rq->current_nr_sectors, (struct idetape_bh *)rq->special);
-@@ -3241,9 +3223,6 @@ static int idetape_add_chrdev_write_request (ide_drive_t *drive, int blocks)
+@@ -3241,9 +3228,6 @@ static int idetape_add_chrdev_write_request (ide_drive_t *drive, int blocks)
  	idetape_switch_buffers(tape, new_stage);
  	idetape_add_stage_tail(drive, new_stage);
  	tape->pipeline_head++;
@@ -55886,7 +59928,7 @@
  	calculate_speeds(drive);
  
  	/*
-@@ -3493,9 +3472,6 @@ static int idetape_add_chrdev_read_request (ide_drive_t *drive,int blocks)
+@@ -3493,9 +3477,6 @@ static int idetape_add_chrdev_read_request (ide_drive_t *drive,int blocks)
  		idetape_remove_stage_head(drive);
  		spin_unlock_irqrestore(&tape->spinlock, flags);
  		tape->pipeline_head++;
@@ -55896,7 +59938,7 @@
  		calculate_speeds(drive);
  	}
  #if IDETAPE_DEBUG_BUGS
-@@ -4724,10 +4700,8 @@ static void ide_tape_release(struct kref *kref)
+@@ -4724,10 +4705,8 @@ static void ide_tape_release(struct kref *kref)
  
  	drive->dsc_overlap = 0;
  	drive->driver_data = NULL;
@@ -55909,7 +59951,7 @@
  	idetape_devs[tape->minor] = NULL;
  	g->private_data = NULL;
  	put_disk(g);
-@@ -4884,10 +4858,10 @@ static int ide_tape_probe(ide_drive_t *drive)
+@@ -4884,10 +4863,10 @@ static int ide_tape_probe(ide_drive_t *drive)
  
  	idetape_setup(drive, tape, minor);
  
@@ -55925,15 +59967,30 @@
  	g->fops = &idetape_block_ops;
  	ide_register_region(g);
 diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
-index 2b60f1b..2d63ea9 100644
+index 2b60f1b..5eb6fa1 100644
 --- a/drivers/ide/ide-taskfile.c
 +++ b/drivers/ide/ide-taskfile.c
-@@ -63,65 +63,78 @@ static void taskfile_output_data(ide_drive_t *drive, void *buffer, u32 wcount)
- 	}
- }
+@@ -35,93 +35,81 @@
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
  
+-static void ata_bswap_data (void *buffer, int wcount)
 +void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
-+{
+ {
+-	u16 *p = buffer;
+-
+-	while (wcount--) {
+-		*p = *p << 8 | *p >> 8; p++;
+-		*p = *p << 8 | *p >> 8; p++;
+-	}
+-}
+-
+-static void taskfile_input_data(ide_drive_t *drive, void *buffer, u32 wcount)
+-{
+-	HWIF(drive)->ata_input_data(drive, buffer, wcount);
+-	if (drive->bswap)
+-		ata_bswap_data(buffer, wcount);
+-}
 +	ide_hwif_t *hwif = drive->hwif;
 +	struct ide_taskfile *tf = &task->tf;
 +	u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
@@ -55946,10 +60003,22 @@
 +		"lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
 +		drive->name, tf->feature, tf->nsect, tf->lbal,
 +		tf->lbam, tf->lbah, tf->device, tf->command);
++	printk("%s: hob: nsect 0x%02x lbal 0x%02x "
++		"lbam 0x%02x lbah 0x%02x\n",
++		drive->name, tf->hob_nsect, tf->hob_lbal,
++		tf->hob_lbam, tf->hob_lbah);
 +#endif
-+
-+	if (IDE_CONTROL_REG)
-+		hwif->OUTB(drive->ctl, IDE_CONTROL_REG); /* clear nIEN */
+ 
+-static void taskfile_output_data(ide_drive_t *drive, void *buffer, u32 wcount)
+-{
+-	if (drive->bswap) {
+-		ata_bswap_data(buffer, wcount);
+-		HWIF(drive)->ata_output_data(drive, buffer, wcount);
+-		ata_bswap_data(buffer, wcount);
+-	} else {
+-		HWIF(drive)->ata_output_data(drive, buffer, wcount);
+-	}
++	ide_set_irq(drive, 1);
 +
 +	if ((task->tf_flags & IDE_TFLAG_NO_SELECT_MASK) == 0)
 +		SELECT_MASK(drive, 0);
@@ -55981,8 +60050,8 @@
 +
 +	if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
 +		hwif->OUTB((tf->device & HIHI) | drive->select.all, IDE_SELECT_REG);
-+}
-+
+ }
+ 
  int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
  {
  	ide_task_t args;
@@ -56000,7 +60069,7 @@
 -	args.handler	  = &task_in_intr;
 -	return ide_raw_taskfile(drive, &args, buf);
 +		args.tf.command = WIN_PIDENTIFY;
-+	args.tf_flags	= IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE;
++	args.tf_flags	= IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
 +	args.data_phase	= TASKFILE_IN;
 +	return ide_raw_taskfile(drive, &args, buf, 1);
  }
@@ -56045,18 +60114,18 @@
 -		ide_execute_command(drive, taskfile->command, task->handler, WAIT_WORSTCASE, NULL);
 -		return ide_started;
 -	}
--
--	if (!drive->using_dma)
--		return ide_stopped;
 +	if (blk_fs_request(task->rq) || (task->tf_flags & IDE_TFLAG_FLAGGED))
 +		return 1;
  
+-	if (!drive->using_dma)
+-		return ide_stopped;
+-
 -	switch (taskfile->command) {
 +	switch (task->tf.command) {
  		case WIN_WRITEDMA_ONCE:
  		case WIN_WRITEDMA:
  		case WIN_WRITEDMA_EXT:
-@@ -129,24 +142,79 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
+@@ -129,24 +117,79 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
  		case WIN_READDMA:
  		case WIN_READDMA_EXT:
  		case WIN_IDENTIFY_DMA:
@@ -56074,8 +60143,8 @@
  
 -	return ide_stopped;
 +	return 0;
-+}
-+
+ }
+ 
 +static ide_startstop_t task_no_data_intr(ide_drive_t *);
 +static ide_startstop_t set_geometry_intr(ide_drive_t *);
 +static ide_startstop_t recal_intr(ide_drive_t *);
@@ -56136,9 +60205,9 @@
 +		hwif->dma_start(drive);
 +		return ide_started;
 +	}
- }
++}
 +EXPORT_SYMBOL_GPL(do_rw_taskfile);
- 
++
  /*
   * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
   */
@@ -56147,7 +60216,7 @@
  {
  	ide_hwif_t *hwif = HWIF(drive);
  	u8 stat;
-@@ -164,7 +232,7 @@ ide_startstop_t set_multmode_intr (ide_drive_t *drive)
+@@ -164,7 +207,7 @@ ide_startstop_t set_multmode_intr (ide_drive_t *drive)
  /*
   * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd.
   */
@@ -56156,7 +60225,7 @@
  {
  	ide_hwif_t *hwif = HWIF(drive);
  	int retries = 5;
-@@ -187,7 +255,7 @@ ide_startstop_t set_geometry_intr (ide_drive_t *drive)
+@@ -187,7 +230,7 @@ ide_startstop_t set_geometry_intr (ide_drive_t *drive)
  /*
   * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd.
   */
@@ -56165,7 +60234,7 @@
  {
  	ide_hwif_t *hwif = HWIF(drive);
  	u8 stat;
-@@ -200,7 +268,7 @@ ide_startstop_t recal_intr (ide_drive_t *drive)
+@@ -200,7 +243,7 @@ ide_startstop_t recal_intr (ide_drive_t *drive)
  /*
   * Handler for commands without a data phase
   */
@@ -56174,25 +60243,100 @@
  {
  	ide_task_t *args	= HWGROUP(drive)->rq->special;
  	ide_hwif_t *hwif	= HWIF(drive);
-@@ -217,8 +285,6 @@ ide_startstop_t task_no_data_intr (ide_drive_t *drive)
+@@ -217,9 +260,7 @@ ide_startstop_t task_no_data_intr (ide_drive_t *drive)
  	return ide_stopped;
  }
  
 -EXPORT_SYMBOL(task_no_data_intr);
 -
- static u8 wait_drive_not_busy(ide_drive_t *drive)
+-static u8 wait_drive_not_busy(ide_drive_t *drive)
++u8 wait_drive_not_busy(ide_drive_t *drive)
  {
  	ide_hwif_t *hwif = HWIF(drive);
-@@ -363,7 +429,7 @@ static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
+ 	int retries;
+@@ -227,8 +268,7 @@ static u8 wait_drive_not_busy(ide_drive_t *drive)
+ 
+ 	/*
+ 	 * Last sector was transfered, wait until drive is ready.
+-	 * This can take up to 10 usec, but we will wait max 1 ms
+-	 * (drive_cmd_intr() waits that long).
++	 * This can take up to 10 usec, but we will wait max 1 ms.
+ 	 */
+ 	for (retries = 0; retries < 100; retries++) {
+ 		if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT)
+@@ -283,9 +323,9 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
+ 
+ 	/* do the actual data transfer */
+ 	if (write)
+-		taskfile_output_data(drive, buf, SECTOR_WORDS);
++		hwif->ata_output_data(drive, buf, SECTOR_WORDS);
+ 	else
+-		taskfile_input_data(drive, buf, SECTOR_WORDS);
++		hwif->ata_input_data(drive, buf, SECTOR_WORDS);
+ 
+ 	kunmap_atomic(buf, KM_BIO_SRC_IRQ);
+ #ifdef CONFIG_HIGHMEM
+@@ -305,9 +345,18 @@ static void ide_pio_multi(ide_drive_t *drive, unsigned int write)
+ static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
+ 				     unsigned int write)
+ {
++	u8 saved_io_32bit = drive->io_32bit;
++
+ 	if (rq->bio)	/* fs request */
+ 		rq->errors = 0;
+ 
++	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
++		ide_task_t *task = rq->special;
++
++		if (task->tf_flags & IDE_TFLAG_IO_16BIT)
++			drive->io_32bit = 0;
++	}
++
+ 	touch_softlockup_watchdog();
+ 
+ 	switch (drive->hwif->data_phase) {
+@@ -319,6 +368,8 @@ static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
+ 		ide_pio_sector(drive, write);
+ 		break;
+ 	}
++
++	drive->io_32bit = saved_io_32bit;
+ }
+ 
+ static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
+@@ -356,40 +407,35 @@ static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
+ 	return ide_error(drive, s, stat);
+ }
+ 
+-static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
++void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
+ {
+-	HWIF(drive)->cursg = NULL;
+-
  	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
- 		ide_task_t *task = rq->special;
+-		ide_task_t *task = rq->special;
++		u8 err = drive->hwif->INB(IDE_ERROR_REG);
  
 -		if (task->tf_out_flags.all) {
-+		if (task->tf_flags & IDE_TFLAG_FLAGGED) {
- 			u8 err = drive->hwif->INB(IDE_ERROR_REG);
- 			ide_end_drive_cmd(drive, stat, err);
- 			return;
-@@ -382,7 +448,7 @@ static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
+-			u8 err = drive->hwif->INB(IDE_ERROR_REG);
+-			ide_end_drive_cmd(drive, stat, err);
+-			return;
+-		}
++		ide_end_drive_cmd(drive, stat, err);
++		return;
+ 	}
+ 
+ 	if (rq->rq_disk) {
+ 		ide_driver_t *drv;
+ 
+ 		drv = *(ide_driver_t **)rq->rq_disk->private_data;;
+-		drv->end_request(drive, 1, rq->hard_nr_sectors);
++		drv->end_request(drive, 1, rq->nr_sectors);
+ 	} else
+-		ide_end_request(drive, 1, rq->hard_nr_sectors);
++		ide_end_request(drive, 1, rq->nr_sectors);
+ }
+ 
  /*
   * Handler for command with PIO data-in phase (Read/Read Multiple).
   */
@@ -56201,7 +60345,24 @@
  {
  	ide_hwif_t *hwif = drive->hwif;
  	struct request *rq = HWGROUP(drive)->rq;
-@@ -413,7 +479,6 @@ ide_startstop_t task_in_intr (ide_drive_t *drive)
+ 	u8 stat = hwif->INB(IDE_STATUS_REG);
+ 
+ 	/* new way for dealing with premature shared PCI interrupts */
+-	if (!OK_STAT(stat, DATA_READY, BAD_R_STAT)) {
++	if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) {
+ 		if (stat & (ERR_STAT | DRQ_STAT))
+ 			return task_error(drive, rq, __FUNCTION__, stat);
+ 		/* No data yet, so wait for another IRQ. */
+@@ -402,7 +448,7 @@ ide_startstop_t task_in_intr (ide_drive_t *drive)
+ 	/* If it was the last datablock check status and finish transfer. */
+ 	if (!hwif->nleft) {
+ 		stat = wait_drive_not_busy(drive);
+-		if (!OK_STAT(stat, 0, BAD_R_STAT))
++		if (!OK_STAT(stat, 0, BAD_STAT))
+ 			return task_error(drive, rq, __FUNCTION__, stat);
+ 		task_end_request(drive, rq, stat);
+ 		return ide_stopped;
+@@ -413,7 +459,6 @@ ide_startstop_t task_in_intr (ide_drive_t *drive)
  
  	return ide_started;
  }
@@ -56209,7 +60370,7 @@
  
  /*
   * Handler for command with PIO data-out phase (Write/Write Multiple).
-@@ -443,7 +508,7 @@ static ide_startstop_t task_out_intr (ide_drive_t *drive)
+@@ -443,11 +488,11 @@ static ide_startstop_t task_out_intr (ide_drive_t *drive)
  	return ide_started;
  }
  
@@ -56218,7 +60379,12 @@
  {
  	ide_startstop_t startstop;
  
-@@ -464,9 +529,8 @@ ide_startstop_t pre_task_out_intr (ide_drive_t *drive, struct request *rq)
+-	if (ide_wait_stat(&startstop, drive, DATA_READY,
++	if (ide_wait_stat(&startstop, drive, DRQ_STAT,
+ 			  drive->bad_wstat, WAIT_DRQ)) {
+ 		printk(KERN_ERR "%s: no DRQ after issuing %sWRITE%s\n",
+ 				drive->name,
+@@ -464,9 +509,8 @@ ide_startstop_t pre_task_out_intr (ide_drive_t *drive, struct request *rq)
  
  	return ide_started;
  }
@@ -56229,7 +60395,7 @@
  {
  	struct request rq;
  
-@@ -481,36 +545,27 @@ static int ide_diag_taskfile(ide_drive_t *drive, ide_task_t *args, unsigned long
+@@ -481,36 +525,27 @@ static int ide_diag_taskfile(ide_drive_t *drive, ide_task_t *args, unsigned long
  	 * if we would find a solution to transfer any size.
  	 * To support special commands like READ LONG.
  	 */
@@ -56279,7 +60445,7 @@
  
  #ifdef CONFIG_IDE_TASK_IOCTL
  int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
-@@ -519,12 +574,12 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
+@@ -519,13 +554,12 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
  	ide_task_t		args;
  	u8 *outbuf		= NULL;
  	u8 *inbuf		= NULL;
@@ -56290,11 +60456,12 @@
  	int tasksize		= sizeof(struct ide_task_request_s);
  	unsigned int taskin	= 0;
  	unsigned int taskout	= 0;
+-	u8 io_32bit		= drive->io_32bit;
 +	u16 nsect		= 0;
- 	u8 io_32bit		= drive->io_32bit;
  	char __user *buf = (char __user *)arg;
  
-@@ -572,24 +627,52 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
+ //	printk("IDE Taskfile ...\n");
+@@ -572,24 +606,52 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
  	}
  
  	memset(&args, 0, sizeof(ide_task_t));
@@ -56310,9 +60477,10 @@
 +
 +	args.data_phase = req_task->data_phase;
 +
-+	args.tf_flags = IDE_TFLAG_OUT_DEVICE;
++	args.tf_flags = IDE_TFLAG_IO_16BIT | IDE_TFLAG_DEVICE |
++			IDE_TFLAG_IN_TF;
 +	if (drive->addressing == 1)
-+		args.tf_flags |= IDE_TFLAG_LBA48;
++		args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_IN_HOB);
 +
 +	if (req_task->out_flags.all) {
 +		args.tf_flags |= IDE_TFLAG_FLAGGED;
@@ -56348,7 +60516,7 @@
 +	if (req_task->in_flags.b.data)
 +		args.tf_flags |= IDE_TFLAG_IN_DATA;
  
- 	drive->io_32bit = 0;
+-	drive->io_32bit = 0;
  	switch(req_task->data_phase) {
 -		case TASKFILE_OUT_DMAQ:
 -		case TASKFILE_OUT_DMA:
@@ -56361,7 +60529,7 @@
  		case TASKFILE_MULTI_OUT:
  			if (!drive->mult_count) {
  				/* (hs): give up if multcount is not set */
-@@ -601,9 +684,11 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
+@@ -601,9 +663,11 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
  			}
  			/* fall through */
  		case TASKFILE_OUT:
@@ -56376,7 +60544,7 @@
  			break;
  		case TASKFILE_MULTI_IN:
  			if (!drive->mult_count) {
-@@ -616,22 +701,46 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
+@@ -616,22 +680,46 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
  			}
  			/* fall through */
  		case TASKFILE_IN:
@@ -56431,15 +60599,55 @@
  
  	if (copy_to_user(buf, req_task, tasksize)) {
  		err = -EFAULT;
-@@ -688,6 +797,7 @@ int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
- 	u8 xfer_rate = 0;
- 	int argsize = 4;
+@@ -658,40 +746,24 @@ abort:
+ 
+ //	printk("IDE Taskfile ioctl ended. rc = %i\n", err);
+ 
+-	drive->io_32bit = io_32bit;
+-
+ 	return err;
+ }
+ #endif
+ 
+-int ide_wait_cmd (ide_drive_t *drive, u8 cmd, u8 nsect, u8 feature, u8 sectors, u8 *buf)
+-{
+-	struct request rq;
+-	u8 buffer[4];
+-
+-	if (!buf)
+-		buf = buffer;
+-	memset(buf, 0, 4 + SECTOR_WORDS * 4 * sectors);
+-	ide_init_drive_cmd(&rq);
+-	rq.buffer = buf;
+-	*buf++ = cmd;
+-	*buf++ = nsect;
+-	*buf++ = feature;
+-	*buf++ = sectors;
+-	return ide_do_drive_cmd(drive, &rq, ide_wait);
+-}
+-
+ int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
+ {
+-	int err = 0;
+-	u8 args[4], *argbuf = args;
+-	u8 xfer_rate = 0;
+-	int argsize = 4;
++	u8 *buf = NULL;
++	int bufsize = 0, err = 0;
++	u8 args[4], xfer_rate = 0;
  	ide_task_t tfargs;
 +	struct ide_taskfile *tf = &tfargs.tf;
  
  	if (NULL == (void *) arg) {
  		struct request rq;
-@@ -699,13 +809,10 @@ int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
++
+ 		ide_init_drive_cmd(&rq);
++		rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
++
+ 		return ide_do_drive_cmd(drive, &rq, ide_wait);
+ 	}
+ 
+@@ -699,27 +771,40 @@ int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
  		return -EFAULT;
  
  	memset(&tfargs, 0, sizeof(ide_task_t));
@@ -56451,13 +60659,60 @@
 -	tfargs.tfRegister[IDE_SELECT_OFFSET]  = 0x00;
 -	tfargs.tfRegister[IDE_COMMAND_OFFSET] = args[0];
 +	tf->feature = args[2];
-+	tf->nsect   = args[3];
-+	tf->lbal    = args[1];
++	if (args[0] == WIN_SMART) {
++		tf->nsect = args[3];
++		tf->lbal  = args[1];
++		tf->lbam  = 0x4f;
++		tf->lbah  = 0xc2;
++		tfargs.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_IN_NSECT;
++	} else {
++		tf->nsect = args[1];
++		tfargs.tf_flags = IDE_TFLAG_OUT_FEATURE |
++				  IDE_TFLAG_OUT_NSECT | IDE_TFLAG_IN_NSECT;
++	}
 +	tf->command = args[0];
++	tfargs.data_phase = args[3] ? TASKFILE_IN : TASKFILE_NO_DATA;
  
  	if (args[3]) {
- 		argsize = 4 + (SECTOR_WORDS * 4 * args[3]);
-@@ -734,135 +841,28 @@ abort:
+-		argsize = 4 + (SECTOR_WORDS * 4 * args[3]);
+-		argbuf = kzalloc(argsize, GFP_KERNEL);
+-		if (argbuf == NULL)
++		tfargs.tf_flags |= IDE_TFLAG_IO_16BIT;
++		bufsize = SECTOR_WORDS * 4 * args[3];
++		buf = kzalloc(bufsize, GFP_KERNEL);
++		if (buf == NULL)
+ 			return -ENOMEM;
+ 	}
++
+ 	if (set_transfer(drive, &tfargs)) {
+ 		xfer_rate = args[1];
+ 		if (ide_ata66_check(drive, &tfargs))
+ 			goto abort;
+ 	}
+ 
+-	err = ide_wait_cmd(drive, args[0], args[1], args[2], args[3], argbuf);
++	err = ide_raw_taskfile(drive, &tfargs, buf, args[3]);
++
++	args[0] = tf->status;
++	args[1] = tf->error;
++	args[2] = tf->nsect;
+ 
+ 	if (!err && xfer_rate) {
+ 		/* active-retuning-calls future */
+@@ -727,142 +812,38 @@ int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
+ 		ide_driveid_update(drive);
+ 	}
+ abort:
+-	if (copy_to_user((void __user *)arg, argbuf, argsize))
++	if (copy_to_user((void __user *)arg, &args, 4))
+ 		err = -EFAULT;
+-	if (argsize > 4)
+-		kfree(argbuf);
++	if (buf) {
++		if (copy_to_user((void __user *)(arg + 4), buf, bufsize))
++			err = -EFAULT;
++		kfree(buf);
++	}
  	return err;
  }
  
@@ -56506,7 +60761,7 @@
 -			return ide_stopped;
 -		}
 -	}
--
+ 
 -	/*
 -	 * (ks) Check taskfile in flags.
 -	 * If set, then execute as it is defined.
@@ -56564,6 +60819,10 @@
 -	 */
 -	hwif->OUTB(taskfile->device_head | drive->select.all, IDE_SELECT_REG);
 -	switch(task->data_phase) {
++	memset(&task, 0, sizeof(task));
++	memcpy(&task.tf_array[7], &args[1], 6);
++	task.tf.command = args[0];
++	task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
  
 -   	        case TASKFILE_OUT_DMAQ:
 -		case TASKFILE_OUT_DMA:
@@ -56571,10 +60830,7 @@
 -		case TASKFILE_IN_DMA:
 -			if (!drive->using_dma)
 -				break;
-+	memset(&task, 0, sizeof(task));
-+	memcpy(&task.tf_array[7], &args[1], 6);
-+	task.tf.command = args[0];
-+	task.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE;
++	err = ide_no_data_taskfile(drive, &task);
  
 -			if (!hwif->dma_setup(drive)) {
 -				hwif->dma_exec_cmd(drive, taskfile->command);
@@ -56582,14 +60838,13 @@
 -				return ide_started;
 -			}
 -			break;
-+	err = ide_no_data_taskfile(drive, &task);
++	args[0] = task.tf.command;
++	memcpy(&args[1], &task.tf_array[7], 6);
  
 -	        default:
 - 			if (task->handler == NULL)
 -				return ide_stopped;
-+	args[0] = task.tf.command;
-+	memcpy(&args[1], &task.tf_array[7], 6);
- 
+-
 -			/* Issue the command */
 -			if (task->prehandler) {
 -				hwif->OUTBSYNC(drive, taskfile->command, IDE_COMMAND_REG);
@@ -56606,10 +60861,72 @@
 +	return err;
  }
 diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
-index 54943da..c6d4f63 100644
+index 54943da..97894ab 100644
 --- a/drivers/ide/ide.c
 +++ b/drivers/ide/ide.c
-@@ -424,7 +424,6 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
+@@ -95,7 +95,7 @@ DEFINE_MUTEX(ide_cfg_mtx);
+  __cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock);
+ 
+ #ifdef CONFIG_IDEPCI_PCIBUS_ORDER
+-static int ide_scan_direction; /* THIS was formerly 2.2.x pci=reverse */
++int ide_scan_direction; /* THIS was formerly 2.2.x pci=reverse */
+ #endif
+ 
+ int noautodma = 0;
+@@ -116,7 +116,7 @@ EXPORT_SYMBOL(ide_hwifs);
+ /*
+  * Do not even *think* about calling this!
+  */
+-static void init_hwif_data(ide_hwif_t *hwif, unsigned int index)
++void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
+ {
+ 	unsigned int unit;
+ 
+@@ -159,6 +159,7 @@ static void init_hwif_data(ide_hwif_t *hwif, unsigned int index)
+ 		init_completion(&drive->gendev_rel_comp);
+ 	}
+ }
++EXPORT_SYMBOL_GPL(ide_init_port_data);
+ 
+ static void init_hwif_default(ide_hwif_t *hwif, unsigned int index)
+ {
+@@ -177,8 +178,6 @@ static void init_hwif_default(ide_hwif_t *hwif, unsigned int index)
+ #endif
+ }
+ 
+-extern void ide_arm_init(void);
+-
+ /*
+  * init_ide_data() sets reasonable default values into all fields
+  * of all instances of the hwifs and drives, but only on the first call.
+@@ -210,16 +209,13 @@ static void __init init_ide_data (void)
+ 	/* Initialise all interface structures */
+ 	for (index = 0; index < MAX_HWIFS; ++index) {
+ 		hwif = &ide_hwifs[index];
+-		init_hwif_data(hwif, index);
++		ide_init_port_data(hwif, index);
+ 		init_hwif_default(hwif, index);
+ #if !defined(CONFIG_PPC32) || !defined(CONFIG_PCI)
+ 		hwif->irq =
+ 			ide_init_default_irq(hwif->io_ports[IDE_DATA_OFFSET]);
+ #endif
+ 	}
+-#ifdef CONFIG_IDE_ARM
+-	ide_arm_init();
+-#endif
+ }
+ 
+ /**
+@@ -414,8 +410,6 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
+ 	hwif->cds			= tmp_hwif->cds;
+ #endif
+ 
+-	hwif->fixup			= tmp_hwif->fixup;
+-
+ 	hwif->set_pio_mode		= tmp_hwif->set_pio_mode;
+ 	hwif->set_dma_mode		= tmp_hwif->set_dma_mode;
+ 	hwif->mdma_filter		= tmp_hwif->mdma_filter;
+@@ -424,7 +418,6 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
  	hwif->reset_poll		= tmp_hwif->reset_poll;
  	hwif->pre_reset			= tmp_hwif->pre_reset;
  	hwif->resetproc			= tmp_hwif->resetproc;
@@ -56617,7 +60934,25 @@
  	hwif->maskproc			= tmp_hwif->maskproc;
  	hwif->quirkproc			= tmp_hwif->quirkproc;
  	hwif->busproc			= tmp_hwif->busproc;
-@@ -468,7 +467,6 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
+@@ -434,16 +427,13 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
+ 	hwif->atapi_input_bytes		= tmp_hwif->atapi_input_bytes;
+ 	hwif->atapi_output_bytes	= tmp_hwif->atapi_output_bytes;
+ 
++	hwif->dma_host_set		= tmp_hwif->dma_host_set;
+ 	hwif->dma_setup			= tmp_hwif->dma_setup;
+ 	hwif->dma_exec_cmd		= tmp_hwif->dma_exec_cmd;
+ 	hwif->dma_start			= tmp_hwif->dma_start;
+ 	hwif->ide_dma_end		= tmp_hwif->ide_dma_end;
+-	hwif->ide_dma_on		= tmp_hwif->ide_dma_on;
+-	hwif->dma_off_quietly		= tmp_hwif->dma_off_quietly;
+ 	hwif->ide_dma_test_irq		= tmp_hwif->ide_dma_test_irq;
+ 	hwif->ide_dma_clear_irq		= tmp_hwif->ide_dma_clear_irq;
+-	hwif->dma_host_on		= tmp_hwif->dma_host_on;
+-	hwif->dma_host_off		= tmp_hwif->dma_host_off;
+ 	hwif->dma_lost_irq		= tmp_hwif->dma_lost_irq;
+ 	hwif->dma_timeout		= tmp_hwif->dma_timeout;
+ 
+@@ -468,7 +458,6 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
  #endif
  
  	hwif->dma_base			= tmp_hwif->dma_base;
@@ -56625,7 +60960,7 @@
  	hwif->dma_command		= tmp_hwif->dma_command;
  	hwif->dma_vendor1		= tmp_hwif->dma_vendor1;
  	hwif->dma_status		= tmp_hwif->dma_status;
-@@ -602,7 +600,6 @@ void ide_unregister(unsigned int index)
+@@ -602,7 +591,6 @@ void ide_unregister(unsigned int index)
  		(void) ide_release_dma(hwif);
  
  		hwif->dma_base = 0;
@@ -56633,7 +60968,112 @@
  		hwif->dma_command = 0;
  		hwif->dma_vendor1 = 0;
  		hwif->dma_status = 0;
-@@ -854,8 +851,7 @@ int set_using_dma(ide_drive_t *drive, int arg)
+@@ -617,7 +605,7 @@ void ide_unregister(unsigned int index)
+ 	tmp_hwif = *hwif;
+ 
+ 	/* restore hwif data to pristine status */
+-	init_hwif_data(hwif, index);
++	ide_init_port_data(hwif, index);
+ 	init_hwif_default(hwif, index);
+ 
+ 	ide_hwif_restore(hwif, &tmp_hwif);
+@@ -683,24 +671,34 @@ void ide_setup_ports (	hw_regs_t *hw,
+  */
+ }
+ 
++void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
++{
++	memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports));
++	hwif->irq = hw->irq;
++	hwif->noprobe = 0;
++	hwif->chipset = hw->chipset;
++	hwif->gendev.parent = hw->dev;
++	hwif->ack_intr = hw->ack_intr;
++}
++EXPORT_SYMBOL_GPL(ide_init_port_hw);
++
+ /**
+  *	ide_register_hw		-	register IDE interface
+  *	@hw: hardware registers
+- *	@fixup: fixup function
+- *	@initializing: set while initializing built-in drivers
++ *	@quirkproc: quirkproc function
+  *	@hwifp: pointer to returned hwif
+  *
+  *	Register an IDE interface, specifying exactly the registers etc.
+- *	Set init=1 iff calling before probes have taken place.
+  *
+  *	Returns -1 on error.
+  */
+ 
+-int ide_register_hw(hw_regs_t *hw, void (*fixup)(ide_hwif_t *),
+-		    int initializing, ide_hwif_t **hwifp)
++int ide_register_hw(hw_regs_t *hw, void (*quirkproc)(ide_drive_t *),
++		    ide_hwif_t **hwifp)
+ {
+ 	int index, retry = 1;
+ 	ide_hwif_t *hwif;
++	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ 
+ 	do {
+ 		for (index = 0; index < MAX_HWIFS; ++index) {
+@@ -712,8 +710,7 @@ int ide_register_hw(hw_regs_t *hw, void (*fixup)(ide_hwif_t *),
+ 			hwif = &ide_hwifs[index];
+ 			if (hwif->hold)
+ 				continue;
+-			if ((!hwif->present && !hwif->mate && !initializing) ||
+-			    (!hwif->io_ports[IDE_DATA_OFFSET] && initializing))
++			if (!hwif->present && hwif->mate == NULL)
+ 				goto found;
+ 		}
+ 		for (index = 0; index < MAX_HWIFS; index++)
+@@ -724,29 +721,23 @@ found:
+ 	if (hwif->present)
+ 		ide_unregister(index);
+ 	else if (!hwif->hold) {
+-		init_hwif_data(hwif, index);
++		ide_init_port_data(hwif, index);
+ 		init_hwif_default(hwif, index);
+ 	}
+ 	if (hwif->present)
+ 		return -1;
+-	memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports));
+-	hwif->irq = hw->irq;
+-	hwif->noprobe = 0;
+-	hwif->fixup = fixup;
+-	hwif->chipset = hw->chipset;
+-	hwif->gendev.parent = hw->dev;
+-	hwif->ack_intr = hw->ack_intr;
+ 
+-	if (initializing == 0) {
+-		u8 idx[4] = { index, 0xff, 0xff, 0xff };
++	ide_init_port_hw(hwif, hw);
++	hwif->quirkproc = quirkproc;
+ 
+-		ide_device_add(idx);
+-	}
++	idx[0] = index;
++
++	ide_device_add(idx);
+ 
+ 	if (hwifp)
+ 		*hwifp = hwif;
+ 
+-	return (initializing || hwif->present) ? index : -1;
++	return hwif->present ? index : -1;
+ }
+ 
+ EXPORT_SYMBOL(ide_register_hw);
+@@ -839,7 +830,7 @@ int set_using_dma(ide_drive_t *drive, int arg)
+ 	if (!drive->id || !(drive->id->capability & 1))
+ 		goto out;
+ 
+-	if (hwif->ide_dma_on == NULL)
++	if (hwif->dma_host_set == NULL)
+ 		goto out;
+ 
+ 	err = -EBUSY;
+@@ -854,8 +845,7 @@ int set_using_dma(ide_drive_t *drive, int arg)
  	err = 0;
  
  	if (arg) {
@@ -56643,8 +61083,827 @@
  			err = -EIO;
  	} else
  		ide_dma_off(drive);
+@@ -888,7 +878,10 @@ int set_pio_mode(ide_drive_t *drive, int arg)
+ 
+ 	if (drive->special.b.set_tune)
+ 		return -EBUSY;
++
+ 	ide_init_drive_cmd(&rq);
++	rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
++
+ 	drive->tune_req = (u8) arg;
+ 	drive->special.b.set_tune = 1;
+ 	(void) ide_do_drive_cmd(drive, &rq, ide_wait);
+@@ -1070,7 +1063,7 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device
+ 			ide_init_hwif_ports(&hw, (unsigned long) args[0],
+ 					    (unsigned long) args[1], NULL);
+ 			hw.irq = args[2];
+-			if (ide_register_hw(&hw, NULL, 0, NULL) == -1)
++			if (ide_register_hw(&hw, NULL, NULL) == -1)
+ 				return -EIO;
+ 			return 0;
+ 		}
+@@ -1231,26 +1224,12 @@ static int __init match_parm (char *s, const char *keywords[], int vals[], int m
+ 	return 0;	/* zero = nothing matched */
+ }
+ 
+-#ifdef CONFIG_BLK_DEV_ALI14XX
+ extern int probe_ali14xx;
+-extern int ali14xx_init(void);
+-#endif
+-#ifdef CONFIG_BLK_DEV_UMC8672
+ extern int probe_umc8672;
+-extern int umc8672_init(void);
+-#endif
+-#ifdef CONFIG_BLK_DEV_DTC2278
+ extern int probe_dtc2278;
+-extern int dtc2278_init(void);
+-#endif
+-#ifdef CONFIG_BLK_DEV_HT6560B
+ extern int probe_ht6560b;
+-extern int ht6560b_init(void);
+-#endif
+-#ifdef CONFIG_BLK_DEV_QD65XX
+ extern int probe_qd65xx;
+-extern int qd65xx_init(void);
+-#endif
++extern int cmd640_vlb;
+ 
+ static int __initdata is_chipset_set[MAX_HWIFS];
+ 
+@@ -1327,7 +1306,7 @@ static int __init ide_setup(char *s)
+ 	if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) {
+ 		const char *hd_words[] = {
+ 			"none", "noprobe", "nowerr", "cdrom", "nodma",
+-			"autotune", "noautotune", "minus8", "swapdata", "bswap",
++			"autotune", "noautotune", "-8", "-9", "-10",
+ 			"noflush", "remap", "remap63", "scsi", NULL };
+ 		unit = s[2] - 'a';
+ 		hw   = unit / MAX_DRIVES;
+@@ -1363,10 +1342,6 @@ static int __init ide_setup(char *s)
+ 			case -7: /* "noautotune" */
+ 				drive->autotune = IDE_TUNE_NOAUTO;
+ 				goto obsolete_option;
+-			case -9: /* "swapdata" */
+-			case -10: /* "bswap" */
+-				drive->bswap = 1;
+-				goto done;
+ 			case -11: /* noflush */
+ 				drive->noflush = 1;
+ 				goto done;
+@@ -1466,11 +1441,8 @@ static int __init ide_setup(char *s)
+ #endif
+ #ifdef CONFIG_BLK_DEV_CMD640
+ 			case -14: /* "cmd640_vlb" */
+-			{
+-				extern int cmd640_vlb; /* flag for cmd640.c */
+ 				cmd640_vlb = 1;
+ 				goto done;
+-			}
+ #endif
+ #ifdef CONFIG_BLK_DEV_HT6560B
+ 			case -13: /* "ht6560b" */
+@@ -1560,79 +1532,6 @@ done:
+ 	return 1;
+ }
+ 
+-extern void __init pnpide_init(void);
+-extern void __exit pnpide_exit(void);
+-extern void __init h8300_ide_init(void);
+-
+-/*
+- * probe_for_hwifs() finds/initializes "known" IDE interfaces
+- */
+-static void __init probe_for_hwifs (void)
+-{
+-#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
+-	ide_scan_pcibus(ide_scan_direction);
+-#endif
+-
+-#ifdef CONFIG_ETRAX_IDE
+-	{
+-		extern void init_e100_ide(void);
+-		init_e100_ide();
+-	}
+-#endif /* CONFIG_ETRAX_IDE */
+-#ifdef CONFIG_BLK_DEV_CMD640
+-	{
+-		extern void ide_probe_for_cmd640x(void);
+-		ide_probe_for_cmd640x();
+-	}
+-#endif /* CONFIG_BLK_DEV_CMD640 */
+-#ifdef CONFIG_BLK_DEV_IDE_PMAC
+-	{
+-		extern int pmac_ide_probe(void);
+-		(void)pmac_ide_probe();
+-	}
+-#endif /* CONFIG_BLK_DEV_IDE_PMAC */
+-#ifdef CONFIG_BLK_DEV_GAYLE
+-	{
+-		extern void gayle_init(void);
+-		gayle_init();
+-	}
+-#endif /* CONFIG_BLK_DEV_GAYLE */
+-#ifdef CONFIG_BLK_DEV_FALCON_IDE
+-	{
+-		extern void falconide_init(void);
+-		falconide_init();
+-	}
+-#endif /* CONFIG_BLK_DEV_FALCON_IDE */
+-#ifdef CONFIG_BLK_DEV_MAC_IDE
+-	{
+-		extern void macide_init(void);
+-		macide_init();
+-	}
+-#endif /* CONFIG_BLK_DEV_MAC_IDE */
+-#ifdef CONFIG_BLK_DEV_Q40IDE
+-	{
+-		extern void q40ide_init(void);
+-		q40ide_init();
+-	}
+-#endif /* CONFIG_BLK_DEV_Q40IDE */
+-#ifdef CONFIG_BLK_DEV_BUDDHA
+-	{
+-		extern void buddha_init(void);
+-		buddha_init();
+-	}
+-#endif /* CONFIG_BLK_DEV_BUDDHA */
+-#ifdef CONFIG_BLK_DEV_IDEPNP
+-	pnpide_init();
+-#endif
+-#ifdef CONFIG_H8300
+-	h8300_ide_init();
+-#endif
+-}
+-
+-/*
+- * Probe module
+- */
+-
+ EXPORT_SYMBOL(ide_lock);
+ 
+ static int ide_bus_match(struct device *dev, struct device_driver *drv)
+@@ -1779,30 +1678,6 @@ static int __init ide_init(void)
+ 
+ 	proc_ide_create();
+ 
+-#ifdef CONFIG_BLK_DEV_ALI14XX
+-	if (probe_ali14xx)
+-		(void)ali14xx_init();
+-#endif
+-#ifdef CONFIG_BLK_DEV_UMC8672
+-	if (probe_umc8672)
+-		(void)umc8672_init();
+-#endif
+-#ifdef CONFIG_BLK_DEV_DTC2278
+-	if (probe_dtc2278)
+-		(void)dtc2278_init();
+-#endif
+-#ifdef CONFIG_BLK_DEV_HT6560B
+-	if (probe_ht6560b)
+-		(void)ht6560b_init();
+-#endif
+-#ifdef CONFIG_BLK_DEV_QD65XX
+-	if (probe_qd65xx)
+-		(void)qd65xx_init();
+-#endif
+-
+-	/* Probe for special PCI and other "known" interface chipsets. */
+-	probe_for_hwifs();
+-
+ 	return 0;
+ }
+ 
+@@ -1838,10 +1713,6 @@ void __exit cleanup_module (void)
+ 	for (index = 0; index < MAX_HWIFS; ++index)
+ 		ide_unregister(index);
+ 
+-#ifdef CONFIG_BLK_DEV_IDEPNP
+-	pnpide_exit();
+-#endif
+-
+ 	proc_ide_destroy();
+ 
+ 	bus_unregister(&ide_bus_type);
+diff --git a/drivers/ide/legacy/Makefile b/drivers/ide/legacy/Makefile
+index 4098223..7043ec7 100644
+--- a/drivers/ide/legacy/Makefile
++++ b/drivers/ide/legacy/Makefile
+@@ -1,15 +1,24 @@
+ 
++# link order is important here
++
+ obj-$(CONFIG_BLK_DEV_ALI14XX)		+= ali14xx.o
++obj-$(CONFIG_BLK_DEV_UMC8672)		+= umc8672.o
+ obj-$(CONFIG_BLK_DEV_DTC2278)		+= dtc2278.o
+ obj-$(CONFIG_BLK_DEV_HT6560B)		+= ht6560b.o
+ obj-$(CONFIG_BLK_DEV_QD65XX)		+= qd65xx.o
+-obj-$(CONFIG_BLK_DEV_UMC8672)		+= umc8672.o
+ 
+-obj-$(CONFIG_BLK_DEV_IDECS)		+= ide-cs.o
++obj-$(CONFIG_BLK_DEV_GAYLE)		+= gayle.o
++obj-$(CONFIG_BLK_DEV_FALCON_IDE)	+= falconide.o
++obj-$(CONFIG_BLK_DEV_MAC_IDE)		+= macide.o
++obj-$(CONFIG_BLK_DEV_Q40IDE)		+= q40ide.o
++obj-$(CONFIG_BLK_DEV_BUDDHA)		+= buddha.o
+ 
+-obj-$(CONFIG_BLK_DEV_PLATFORM)		+= ide_platform.o
++ifeq ($(CONFIG_BLK_DEV_IDECS), m)
++	obj-m += ide-cs.o
++endif
+ 
+-# Last of all
+-obj-$(CONFIG_BLK_DEV_HD)		+= hd.o
++ifeq ($(CONFIG_BLK_DEV_PLATFORM), m)
++	obj-m += ide_platform.o
++endif
+ 
+ EXTRA_CFLAGS	:= -Idrivers/ide
+diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/legacy/ali14xx.c
+index 38c3a6d..5ec0be4 100644
+--- a/drivers/ide/legacy/ali14xx.c
++++ b/drivers/ide/legacy/ali14xx.c
+@@ -231,8 +231,7 @@ int probe_ali14xx = 0;
+ module_param_named(probe, probe_ali14xx, bool, 0);
+ MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets");
+ 
+-/* Can be called directly from ide.c. */
+-int __init ali14xx_init(void)
++static int __init ali14xx_init(void)
+ {
+ 	if (probe_ali14xx == 0)
+ 		goto out;
+@@ -248,9 +247,7 @@ out:
+ 	return -ENODEV;
+ }
+ 
+-#ifdef MODULE
+ module_init(ali14xx_init);
+-#endif
+ 
+ MODULE_AUTHOR("see local file");
+ MODULE_DESCRIPTION("support of ALI 14XX IDE chipsets");
+diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c
+index 4a0be25..74d28e0 100644
+--- a/drivers/ide/legacy/buddha.c
++++ b/drivers/ide/legacy/buddha.c
+@@ -112,6 +112,7 @@ typedef enum BuddhaType_Enum {
+     BOARD_BUDDHA, BOARD_CATWEASEL, BOARD_XSURF
+ } BuddhaType;
+ 
++static const char *buddha_board_name[] = { "Buddha", "Catweasel", "X-Surf" };
+ 
+     /*
+      *  Check and acknowledge the interrupt status
+@@ -143,11 +144,11 @@ static int xsurf_ack_intr(ide_hwif_t *hwif)
+      *  Probe for a Buddha or Catweasel IDE interface
+      */
+ 
+-void __init buddha_init(void)
++static int __init buddha_init(void)
+ {
+ 	hw_regs_t hw;
+ 	ide_hwif_t *hwif;
+-	int i, index;
++	int i;
+ 
+ 	struct zorro_dev *z = NULL;
+ 	u_long buddha_board = 0;
+@@ -156,6 +157,8 @@ void __init buddha_init(void)
+ 
+ 	while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
+ 		unsigned long board;
++		u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
++
+ 		if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) {
+ 			buddha_num_hwifs = BUDDHA_NUM_HWIFS;
+ 			type=BOARD_BUDDHA;
+@@ -195,7 +198,10 @@ fail_base2:
+ 		/* X-Surf doesn't have this.  IRQs are always on */
+ 		if (type != BOARD_XSURF)
+ 			z_writeb(0, buddha_board+BUDDHA_IRQ_MR);
+-		
++
++		printk(KERN_INFO "ide: %s IDE controller\n",
++				 buddha_board_name[type]);
++
+ 		for(i=0;i<buddha_num_hwifs;i++) {
+ 			if(type != BOARD_XSURF) {
+ 				ide_setup_ports(&hw, (buddha_board+buddha_bases[i]),
+@@ -213,23 +219,23 @@ fail_base2:
+ 						IRQ_AMIGA_PORTS);
+ 			}	
+ 
+-			index = ide_register_hw(&hw, NULL, 1, &hwif);
+-			if (index != -1) {
++			hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
++			if (hwif) {
++				u8 index = hwif->index;
++
++				ide_init_port_data(hwif, index);
++				ide_init_port_hw(hwif, &hw);
++
+ 				hwif->mmio = 1;
+-				printk("ide%d: ", index);
+-				switch(type) {
+-				case BOARD_BUDDHA:
+-					printk("Buddha");
+-					break;
+-				case BOARD_CATWEASEL:
+-					printk("Catweasel");
+-					break;
+-				case BOARD_XSURF:
+-					printk("X-Surf");
+-					break;
+-				}
+-				printk(" IDE interface\n");	    
+-			}		      
++
++				idx[i] = index;
++			}
+ 		}
++
++		ide_device_add(idx);
+ 	}
++
++	return 0;
+ }
++
++module_init(buddha_init);
+diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/legacy/dtc2278.c
+index 24a845d..13eee6d 100644
+--- a/drivers/ide/legacy/dtc2278.c
++++ b/drivers/ide/legacy/dtc2278.c
+@@ -150,8 +150,7 @@ int probe_dtc2278 = 0;
+ module_param_named(probe, probe_dtc2278, bool, 0);
+ MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets");
+ 
+-/* Can be called directly from ide.c. */
+-int __init dtc2278_init(void)
++static int __init dtc2278_init(void)
+ {
+ 	if (probe_dtc2278 == 0)
+ 		return -ENODEV;
+@@ -163,9 +162,7 @@ int __init dtc2278_init(void)
+ 	return 0;
+ }
+ 
+-#ifdef MODULE
+ module_init(dtc2278_init);
+-#endif
+ 
+ MODULE_AUTHOR("See Local File");
+ MODULE_DESCRIPTION("support of DTC-2278 VLB IDE chipsets");
+diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
+index 7d7936f..2860956 100644
+--- a/drivers/ide/legacy/falconide.c
++++ b/drivers/ide/legacy/falconide.c
+@@ -62,19 +62,31 @@ EXPORT_SYMBOL(falconide_intr_lock);
+      *  Probe for a Falcon IDE interface
+      */
+ 
+-void __init falconide_init(void)
++static int __init falconide_init(void)
+ {
+     if (MACH_IS_ATARI && ATARIHW_PRESENT(IDE)) {
+ 	hw_regs_t hw;
+-	int index;
++
++	printk(KERN_INFO "ide: Falcon IDE controller\n");
+ 
+ 	ide_setup_ports(&hw, ATA_HD_BASE, falconide_offsets,
+ 			0, 0, NULL,
+ //			falconide_iops,
+ 			IRQ_MFP_IDE);
+-	index = ide_register_hw(&hw, NULL, 1, NULL);
+ 
+-	if (index != -1)
+-	    printk("ide%d: Falcon IDE interface\n", index);
++	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
++	if (hwif) {
++		u8 index = hwif->index;
++		u8 idx[4] = { index, 0xff, 0xff, 0xff };
++
++		ide_init_port_data(hwif, index);
++		ide_init_port_hw(hwif, &hw);
++
++		ide_device_add(idx);
++	}
+     }
++
++    return 0;
+ }
++
++module_init(falconide_init);
+diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c
+index 53331ee..492fa04 100644
+--- a/drivers/ide/legacy/gayle.c
++++ b/drivers/ide/legacy/gayle.c
+@@ -110,12 +110,13 @@ static int gayle_ack_intr_a1200(ide_hwif_t *hwif)
+      *  Probe for a Gayle IDE interface (and optionally for an IDE doubler)
+      */
+ 
+-void __init gayle_init(void)
++static int __init gayle_init(void)
+ {
+     int a4000, i;
++    u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ 
+     if (!MACH_IS_AMIGA)
+-	return;
++	return -ENODEV;
+ 
+     if ((a4000 = AMIGAHW_PRESENT(A4000_IDE)) || AMIGAHW_PRESENT(A1200_IDE))
+ 	goto found;
+@@ -125,15 +126,21 @@ void __init gayle_init(void)
+ 			  NULL))
+ 	goto found;
+ #endif
+-    return;
++    return -ENODEV;
+ 
+ found:
++	printk(KERN_INFO "ide: Gayle IDE controller (A%d style%s)\n",
++			 a4000 ? 4000 : 1200,
++#ifdef CONFIG_BLK_DEV_IDEDOUBLER
++			 ide_doubler ? ", IDE doubler" :
++#endif
++			 "");
++
+     for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) {
+ 	unsigned long base, ctrlport, irqport;
+ 	ide_ack_intr_t *ack_intr;
+ 	hw_regs_t hw;
+ 	ide_hwif_t *hwif;
+-	int index;
+ 	unsigned long phys_base, res_start, res_n;
+ 
+ 	if (a4000) {
+@@ -165,21 +172,23 @@ found:
+ //			&gayle_iops,
+ 			IRQ_AMIGA_PORTS);
+ 
+-	index = ide_register_hw(&hw, NULL, 1, &hwif);
+-	if (index != -1) {
++	hwif = ide_find_port(base);
++	if (hwif) {
++	    u8 index = hwif->index;
++
++	    ide_init_port_data(hwif, index);
++	    ide_init_port_hw(hwif, &hw);
++
+ 	    hwif->mmio = 1;
+-	    switch (i) {
+-		case 0:
+-		    printk("ide%d: Gayle IDE interface (A%d style)\n", index,
+-			   a4000 ? 4000 : 1200);
+-		    break;
+-#ifdef CONFIG_BLK_DEV_IDEDOUBLER
+-		case 1:
+-		    printk("ide%d: IDE doubler\n", index);
+-		    break;
+-#endif /* CONFIG_BLK_DEV_IDEDOUBLER */
+-	    }
++
++	    idx[i] = index;
+ 	} else
+ 	    release_mem_region(res_start, res_n);
+     }
++
++    ide_device_add(idx);
++
++    return 0;
+ }
++
++module_init(gayle_init);
+diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c
+index a4245d1..8da5031 100644
+--- a/drivers/ide/legacy/ht6560b.c
++++ b/drivers/ide/legacy/ht6560b.c
+@@ -307,8 +307,7 @@ int probe_ht6560b = 0;
+ module_param_named(probe, probe_ht6560b, bool, 0);
+ MODULE_PARM_DESC(probe, "probe for HT6560B chipset");
+ 
+-/* Can be called directly from ide.c. */
+-int __init ht6560b_init(void)
++static int __init ht6560b_init(void)
+ {
+ 	ide_hwif_t *hwif, *mate;
+ 	static u8 idx[4] = { 0, 1, 0xff, 0xff };
+@@ -369,9 +368,7 @@ release_region:
+ 	return -ENODEV;
+ }
+ 
+-#ifdef MODULE
+ module_init(ht6560b_init);
+-#endif
+ 
+ MODULE_AUTHOR("See Local File");
+ MODULE_DESCRIPTION("HT-6560B EIDE-controller support");
+diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
+index 03715c0..f4ea15b 100644
+--- a/drivers/ide/legacy/ide-cs.c
++++ b/drivers/ide/legacy/ide-cs.c
+@@ -153,7 +153,7 @@ static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq
+     hw.irq = irq;
+     hw.chipset = ide_pci;
+     hw.dev = &handle->dev;
+-    return ide_register_hw(&hw, &ide_undecoded_slave, 0, NULL);
++    return ide_register_hw(&hw, &ide_undecoded_slave, NULL);
+ }
+ 
+ /*======================================================================
+diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c
+index 7bb79f5..69a0fb0 100644
+--- a/drivers/ide/legacy/ide_platform.c
++++ b/drivers/ide/legacy/ide_platform.c
+@@ -28,39 +28,27 @@ static struct {
+ 	int index;
+ } hwif_prop;
+ 
+-static ide_hwif_t *__devinit plat_ide_locate_hwif(void __iomem *base,
+-	    void __iomem *ctrl, struct pata_platform_info *pdata, int irq,
+-	    int mmio)
++static void __devinit plat_ide_setup_ports(hw_regs_t *hw,
++					   void __iomem *base,
++					   void __iomem *ctrl,
++					   struct pata_platform_info *pdata,
++					   int irq)
+ {
+ 	unsigned long port = (unsigned long)base;
+-	ide_hwif_t *hwif = ide_find_port(port);
+ 	int i;
+ 
+-	if (hwif == NULL)
+-		goto out;
+-
+-	hwif->io_ports[IDE_DATA_OFFSET] = port;
++	hw->io_ports[IDE_DATA_OFFSET] = port;
+ 
+ 	port += (1 << pdata->ioport_shift);
+ 	for (i = IDE_ERROR_OFFSET; i <= IDE_STATUS_OFFSET;
+ 	     i++, port += (1 << pdata->ioport_shift))
+-		hwif->io_ports[i] = port;
+-
+-	hwif->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl;
++		hw->io_ports[i] = port;
+ 
+-	hwif->irq = irq;
++	hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl;
+ 
+-	hwif->chipset = ide_generic;
++	hw->irq = irq;
+ 
+-	if (mmio) {
+-		hwif->mmio = 1;
+-		default_hwif_mmiops(hwif);
+-	}
+-
+-	hwif_prop.hwif = hwif;
+-	hwif_prop.index = hwif->index;
+-out:
+-	return hwif;
++	hw->chipset = ide_generic;
+ }
+ 
+ static int __devinit plat_ide_probe(struct platform_device *pdev)
+@@ -71,6 +59,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
+ 	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ 	int ret = 0;
+ 	int mmio = 0;
++	hw_regs_t hw;
+ 
+ 	pdata = pdev->dev.platform_data;
+ 
+@@ -106,15 +95,27 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
+ 			res_alt->start, res_alt->end - res_alt->start + 1);
+ 	}
+ 
+-	hwif = plat_ide_locate_hwif(hwif_prop.plat_ide_mapbase,
+-	         hwif_prop.plat_ide_alt_mapbase, pdata, res_irq->start, mmio);
+-
++	hwif = ide_find_port((unsigned long)hwif_prop.plat_ide_mapbase);
+ 	if (!hwif) {
+ 		ret = -ENODEV;
+ 		goto out;
+ 	}
+-	hwif->gendev.parent = &pdev->dev;
+-	hwif->noprobe = 0;
++
++	memset(&hw, 0, sizeof(hw));
++	plat_ide_setup_ports(&hw, hwif_prop.plat_ide_mapbase,
++			     hwif_prop.plat_ide_alt_mapbase,
++			     pdata, res_irq->start);
++	hw.dev = &pdev->dev;
++
++	ide_init_port_hw(hwif, &hw);
++
++	if (mmio) {
++		hwif->mmio = 1;
++		default_hwif_mmiops(hwif);
++	}
++
++	hwif_prop.hwif = hwif;
++	hwif_prop.index = hwif->index;
+ 
+ 	idx[0] = hwif->index;
+ 
+diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c
+index 5c6aa77..782d4c7 100644
+--- a/drivers/ide/legacy/macide.c
++++ b/drivers/ide/legacy/macide.c
+@@ -77,15 +77,17 @@ int macide_ack_intr(ide_hwif_t* hwif)
+ 	return 0;
+ }
+ 
++static const char *mac_ide_name[] =
++	{ "Quadra", "Powerbook", "Powerbook Baboon" };
++
+ /*
+  * Probe for a Macintosh IDE interface
+  */
+ 
+-void __init macide_init(void)
++static int __init macide_init(void)
+ {
+ 	hw_regs_t hw;
+ 	ide_hwif_t *hwif;
+-	int index = -1;
+ 
+ 	switch (macintosh_config->ide_type) {
+ 	case MAC_IDE_QUADRA:
+@@ -93,48 +95,50 @@ void __init macide_init(void)
+ 				0, 0, macide_ack_intr,
+ //				quadra_ide_iops,
+ 				IRQ_NUBUS_F);
+-		index = ide_register_hw(&hw, NULL, 1, &hwif);
+ 		break;
+ 	case MAC_IDE_PB:
+ 		ide_setup_ports(&hw, IDE_BASE, macide_offsets,
+ 				0, 0, macide_ack_intr,
+ //				macide_pb_iops,
+ 				IRQ_NUBUS_C);
+-		index = ide_register_hw(&hw, NULL, 1, &hwif);
+ 		break;
+ 	case MAC_IDE_BABOON:
+ 		ide_setup_ports(&hw, BABOON_BASE, macide_offsets,
+ 				0, 0, NULL,
+ //				macide_baboon_iops,
+ 				IRQ_BABOON_1);
+-		index = ide_register_hw(&hw, NULL, 1, &hwif);
+-		if (index == -1) break;
+-		if (macintosh_config->ident == MAC_MODEL_PB190) {
++		break;
++	default:
++		return -ENODEV;
++	}
++
++	printk(KERN_INFO "ide: Macintosh %s IDE controller\n",
++			 mac_ide_name[macintosh_config->ide_type - 1]);
+ 
++	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
++	if (hwif) {
++		u8 index = hwif->index;
++		u8 idx[4] = { index, 0xff, 0xff, 0xff };
++
++		ide_init_port_data(hwif, index);
++		ide_init_port_hw(hwif, &hw);
++
++		if (macintosh_config->ide_type == MAC_IDE_BABOON &&
++		    macintosh_config->ident == MAC_MODEL_PB190) {
+ 			/* Fix breakage in ide-disk.c: drive capacity	*/
+ 			/* is not initialized for drives without a 	*/
+ 			/* hardware ID, and we can't get that without	*/
+ 			/* probing the drive which freezes a 190.	*/
+-
+-			ide_drive_t *drive = &ide_hwifs[index].drives[0];
++			ide_drive_t *drive = &hwif->drives[0];
+ 			drive->capacity64 = drive->cyl*drive->head*drive->sect;
+-
+ 		}
+-		break;
+-
+-	default:
+-	    return;
+-	}
+ 
+-        if (index != -1) {
+ 		hwif->mmio = 1;
+-		if (macintosh_config->ide_type == MAC_IDE_QUADRA)
+-			printk(KERN_INFO "ide%d: Macintosh Quadra IDE interface\n", index);
+-		else if (macintosh_config->ide_type == MAC_IDE_PB)
+-			printk(KERN_INFO "ide%d: Macintosh Powerbook IDE interface\n", index);
+-		else if (macintosh_config->ide_type == MAC_IDE_BABOON)
+-			printk(KERN_INFO "ide%d: Macintosh Powerbook Baboon IDE interface\n", index);
+-		else
+-			printk(KERN_INFO "ide%d: Unknown Macintosh IDE interface\n", index);
++
++		ide_device_add(idx);
+ 	}
++
++	return 0;
+ }
++
++module_init(macide_init);
+diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
+index 6ea46a6..f532973 100644
+--- a/drivers/ide/legacy/q40ide.c
++++ b/drivers/ide/legacy/q40ide.c
+@@ -111,15 +111,17 @@ static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={
+  *  Probe for Q40 IDE interfaces
+  */
+ 
+-void __init q40ide_init(void)
++static int __init q40ide_init(void)
+ {
+     int i;
+     ide_hwif_t *hwif;
+-    int index;
+     const char *name;
++    u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ 
+     if (!MACH_IS_Q40)
+-      return ;
++      return -ENODEV;
++
++    printk(KERN_INFO "ide: Q40 IDE controller\n");
+ 
+     for (i = 0; i < Q40IDE_NUM_HWIFS; i++) {
+ 	hw_regs_t hw;
+@@ -141,10 +143,20 @@ void __init q40ide_init(void)
+ 			0, NULL,
+ //			m68kide_iops,
+ 			q40ide_default_irq(pcide_bases[i]));
+-	index = ide_register_hw(&hw, NULL, 1, &hwif);
+-	// **FIXME**
+-	if (index != -1)
++
++	hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
++	if (hwif) {
++		ide_init_port_data(hwif, hwif->index);
++		ide_init_port_hw(hwif, &hw);
+ 		hwif->mmio = 1;
++
++		idx[i] = hwif->index;
++	}
+     }
++
++    ide_device_add(idx);
++
++    return 0;
+ }
+ 
++module_init(q40ide_init);
+diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c
+index 912e738..2bac4c1 100644
+--- a/drivers/ide/legacy/qd65xx.c
++++ b/drivers/ide/legacy/qd65xx.c
+@@ -478,8 +478,7 @@ int probe_qd65xx = 0;
+ module_param_named(probe, probe_qd65xx, bool, 0);
+ MODULE_PARM_DESC(probe, "probe for QD65xx chipsets");
+ 
+-/* Can be called directly from ide.c. */
+-int __init qd65xx_init(void)
++static int __init qd65xx_init(void)
+ {
+ 	if (probe_qd65xx == 0)
+ 		return -ENODEV;
+@@ -492,9 +491,7 @@ int __init qd65xx_init(void)
+ 	return 0;
+ }
+ 
+-#ifdef MODULE
+ module_init(qd65xx_init);
+-#endif
+ 
+ MODULE_AUTHOR("Samuel Thibault");
+ MODULE_DESCRIPTION("support of qd65xx vlb ide chipset");
+diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/legacy/umc8672.c
+index 79577b9..a1ae1ae 100644
+--- a/drivers/ide/legacy/umc8672.c
++++ b/drivers/ide/legacy/umc8672.c
+@@ -169,8 +169,7 @@ int probe_umc8672 = 0;
+ module_param_named(probe, probe_umc8672, bool, 0);
+ MODULE_PARM_DESC(probe, "probe for UMC8672 chipset");
+ 
+-/* Can be called directly from ide.c. */
+-int __init umc8672_init(void)
++static int __init umc8672_init(void)
+ {
+ 	if (probe_umc8672 == 0)
+ 		goto out;
+@@ -181,9 +180,7 @@ out:
+ 	return -ENODEV;;
+ }
+ 
+-#ifdef MODULE
+ module_init(umc8672_init);
+-#endif
+ 
+ MODULE_AUTHOR("Wolfram Podien");
+ MODULE_DESCRIPTION("Support for UMC 8672 IDE chipset");
 diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
-index a4ce3ba..a4d0d4c 100644
+index a4ce3ba..2d3e511 100644
 --- a/drivers/ide/mips/au1xxx-ide.c
 +++ b/drivers/ide/mips/au1xxx-ide.c
 @@ -198,8 +198,6 @@ static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed)
@@ -56656,6 +61915,117 @@
  	}
  
  	au_writel(mem_sttime,MEM_STTIME2);
+@@ -397,26 +395,10 @@ static int auide_dma_test_irq(ide_drive_t *drive)
+ 	return 0;
+ }
+ 
+-static void auide_dma_host_on(ide_drive_t *drive)
+-{
+-}
+-
+-static int auide_dma_on(ide_drive_t *drive)
+-{
+-	drive->using_dma = 1;
+-
+-	return 0;
+-}
+-
+-static void auide_dma_host_off(ide_drive_t *drive)
++static void auide_dma_host_set(ide_drive_t *drive, int on)
+ {
+ }
+ 
+-static void auide_dma_off_quietly(ide_drive_t *drive)
+-{
+-	drive->using_dma = 0;
+-}
+-
+ static void auide_dma_lost_irq(ide_drive_t *drive)
+ {
+ 	printk(KERN_ERR "%s: IRQ lost\n", drive->name);
+@@ -643,12 +625,13 @@ static int au_ide_probe(struct device *dev)
+ 	/* FIXME:  This might possibly break PCMCIA IDE devices */
+ 
+ 	hwif                            = &ide_hwifs[pdev->id];
+-	hwif->irq			= ahwif->irq;
+-	hwif->chipset                   = ide_au1xxx;
+ 
+ 	memset(&hw, 0, sizeof(hw));
+ 	auide_setup_ports(&hw, ahwif);
+-	memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
++	hw.irq = ahwif->irq;
++	hw.chipset = ide_au1xxx;
++
++	ide_init_port_hw(hwif, &hw);
+ 
+ 	hwif->ultra_mask                = 0x0;  /* Disable Ultra DMA */
+ #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
+@@ -662,7 +645,6 @@ static int au_ide_probe(struct device *dev)
+ 	hwif->pio_mask = ATA_PIO4;
+ 	hwif->host_flags = IDE_HFLAG_POST_SET_MODE;
+ 
+-	hwif->noprobe = 0;
+ 	hwif->drives[0].unmask          = 1;
+ 	hwif->drives[1].unmask          = 1;
+ 
+@@ -684,29 +666,25 @@ static int au_ide_probe(struct device *dev)
+ 	hwif->set_dma_mode		= &auide_set_dma_mode;
+ 
+ #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
+-	hwif->dma_off_quietly		= &auide_dma_off_quietly;
+ 	hwif->dma_timeout		= &auide_dma_timeout;
+ 
+ 	hwif->mdma_filter		= &auide_mdma_filter;
+ 
++	hwif->dma_host_set		= &auide_dma_host_set;
+ 	hwif->dma_exec_cmd              = &auide_dma_exec_cmd;
+ 	hwif->dma_start                 = &auide_dma_start;
+ 	hwif->ide_dma_end               = &auide_dma_end;
+ 	hwif->dma_setup                 = &auide_dma_setup;
+ 	hwif->ide_dma_test_irq          = &auide_dma_test_irq;
+-	hwif->dma_host_off		= &auide_dma_host_off;
+-	hwif->dma_host_on		= &auide_dma_host_on;
+ 	hwif->dma_lost_irq		= &auide_dma_lost_irq;
+-	hwif->ide_dma_on                = &auide_dma_on;
+-#else /* !CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */
++#endif
+ 	hwif->channel                   = 0;
+-	hwif->hold                      = 1;
+ 	hwif->select_data               = 0;    /* no chipset-specific code */
+ 	hwif->config_data               = 0;    /* no chipset-specific code */
+ 
+ 	hwif->drives[0].autotune        = 1;    /* 1=autotune, 2=noautotune, 0=default */
+ 	hwif->drives[1].autotune	= 1;
+-#endif
++
+ 	hwif->drives[0].no_io_32bit	= 1;
+ 	hwif->drives[1].no_io_32bit	= 1;
+ 
+diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
+index 521edd4..8b3959d 100644
+--- a/drivers/ide/mips/swarm.c
++++ b/drivers/ide/mips/swarm.c
+@@ -117,6 +117,7 @@ static int __devinit swarm_ide_probe(struct device *dev)
+ 	default_hwif_mmiops(hwif);
+ 	/* Prevent resource map manipulation.  */
+ 	hwif->mmio = 1;
++	hwif->chipset = ide_generic;
+ 	hwif->noprobe = 0;
+ 
+ 	for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
+diff --git a/drivers/ide/pci/Makefile b/drivers/ide/pci/Makefile
+index 95d1ea8..9480325 100644
+--- a/drivers/ide/pci/Makefile
++++ b/drivers/ide/pci/Makefile
+@@ -36,4 +36,8 @@ obj-$(CONFIG_BLK_DEV_VIA82CXXX)		+= via82cxxx.o
+ # Must appear at the end of the block
+ obj-$(CONFIG_BLK_DEV_GENERIC)          += generic.o
+ 
++ifeq ($(CONFIG_BLK_DEV_CMD640), m)
++	obj-m += cmd640.o
++endif
++
+ EXTRA_CFLAGS	:= -Idrivers/ide
 diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
 index 4426850..7f4d185 100644
 --- a/drivers/ide/pci/aec62xx.c
@@ -56735,24 +62105,319 @@
  	 IDE_HFLAG_IO_32BIT | \
  	 IDE_HFLAG_UNMASK_IRQS | \
 diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c
-index ef8e016..5ae2656 100644
+index ef8e016..4918719 100644
 --- a/drivers/ide/pci/atiixp.c
 +++ b/drivers/ide/pci/atiixp.c
-@@ -133,9 +133,6 @@ static void atiixp_set_dma_mode(ide_drive_t *drive, const u8 speed)
+@@ -1,5 +1,5 @@
+ /*
+- *  linux/drivers/ide/pci/atiixp.c	Version 0.03	Aug 3 2007
++ *  linux/drivers/ide/pci/atiixp.c	Version 0.05	Nov 9 2007
+  *
+  *  Copyright (C) 2003 ATI Inc. <hyu at ati.com>
+  *  Copyright (C) 2004,2007 Bartlomiej Zolnierkiewicz
+@@ -43,47 +43,8 @@ static atiixp_ide_timing mdma_timing[] = {
+ 	{ 0x02, 0x00 },
+ };
+ 
+-static int save_mdma_mode[4];
+-
+ static DEFINE_SPINLOCK(atiixp_lock);
+ 
+-static void atiixp_dma_host_on(ide_drive_t *drive)
+-{
+-	struct pci_dev *dev = drive->hwif->pci_dev;
+-	unsigned long flags;
+-	u16 tmp16;
+-
+-	spin_lock_irqsave(&atiixp_lock, flags);
+-
+-	pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
+-	if (save_mdma_mode[drive->dn])
+-		tmp16 &= ~(1 << drive->dn);
+-	else
+-		tmp16 |= (1 << drive->dn);
+-	pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
+-
+-	spin_unlock_irqrestore(&atiixp_lock, flags);
+-
+-	ide_dma_host_on(drive);
+-}
+-
+-static void atiixp_dma_host_off(ide_drive_t *drive)
+-{
+-	struct pci_dev *dev = drive->hwif->pci_dev;
+-	unsigned long flags;
+-	u16 tmp16;
+-
+-	spin_lock_irqsave(&atiixp_lock, flags);
+-
+-	pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
+-	tmp16 &= ~(1 << drive->dn);
+-	pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
+-
+-	spin_unlock_irqrestore(&atiixp_lock, flags);
+-
+-	ide_dma_host_off(drive);
+-}
+-
+ /**
+  *	atiixp_set_pio_mode	-	set host controller for PIO mode
+  *	@drive: drive
+@@ -132,29 +93,33 @@ static void atiixp_set_dma_mode(ide_drive_t *drive, const u8 speed)
+ 	int timing_shift = (drive->dn & 2) ? 16 : 0 + (drive->dn & 1) ? 0 : 8;
  	u32 tmp32;
  	u16 tmp16;
- 
+-
 -	if (speed < XFER_MW_DMA_0)
 -		return;
--
++	u16 udma_ctl = 0;
+ 
  	spin_lock_irqsave(&atiixp_lock, flags);
  
- 	save_mdma_mode[drive->dn] = 0;
+-	save_mdma_mode[drive->dn] = 0;
++	pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &udma_ctl);
++
+ 	if (speed >= XFER_UDMA_0) {
+ 		pci_read_config_word(dev, ATIIXP_IDE_UDMA_MODE, &tmp16);
+ 		tmp16 &= ~(0x07 << (drive->dn * 4));
+ 		tmp16 |= ((speed & 0x07) << (drive->dn * 4));
+ 		pci_write_config_word(dev, ATIIXP_IDE_UDMA_MODE, tmp16);
+-	} else {
+-		if ((speed >= XFER_MW_DMA_0) && (speed <= XFER_MW_DMA_2)) {
+-			save_mdma_mode[drive->dn] = speed;
+-			pci_read_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, &tmp32);
+-			tmp32 &= ~(0xff << timing_shift);
+-			tmp32 |= (mdma_timing[speed & 0x03].recover_width << timing_shift) |
+-				(mdma_timing[speed & 0x03].command_width << (timing_shift + 4));
+-			pci_write_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, tmp32);
+-		}
++
++		udma_ctl |= (1 << drive->dn);
++	} else if (speed >= XFER_MW_DMA_0) {
++		u8 i = speed & 0x03;
++
++		pci_read_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, &tmp32);
++		tmp32 &= ~(0xff << timing_shift);
++		tmp32 |= (mdma_timing[i].recover_width << timing_shift) |
++			 (mdma_timing[i].command_width << (timing_shift + 4));
++		pci_write_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, tmp32);
++
++		udma_ctl &= ~(1 << drive->dn);
+ 	}
+ 
++	pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, udma_ctl);
++
+ 	spin_unlock_irqrestore(&atiixp_lock, flags);
+ }
+ 
+@@ -184,9 +149,6 @@ static void __devinit init_hwif_atiixp(ide_hwif_t *hwif)
+ 		hwif->cbl = ATA_CBL_PATA80;
+ 	else
+ 		hwif->cbl = ATA_CBL_PATA40;
+-
+-	hwif->dma_host_on = &atiixp_dma_host_on;
+-	hwif->dma_host_off = &atiixp_dma_host_off;
+ }
+ 
+ static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
+diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c
+index 4aa4810..da3565e 100644
+--- a/drivers/ide/pci/cmd640.c
++++ b/drivers/ide/pci/cmd640.c
+@@ -706,9 +706,9 @@ static int pci_conf2(void)
+ }
+ 
+ /*
+- * Probe for a cmd640 chipset, and initialize it if found.  Called from ide.c
++ * Probe for a cmd640 chipset, and initialize it if found.
+  */
+-int __init ide_probe_for_cmd640x (void)
++static int __init cmd640x_init(void)
+ {
+ #ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+ 	int second_port_toggled = 0;
+@@ -717,6 +717,7 @@ int __init ide_probe_for_cmd640x (void)
+ 	const char *bus_type, *port2;
+ 	unsigned int index;
+ 	u8 b, cfr;
++	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+ 
+ 	if (cmd640_vlb && probe_for_cmd640_vlb()) {
+ 		bus_type = "VLB";
+@@ -769,6 +770,8 @@ int __init ide_probe_for_cmd640x (void)
+ 	cmd_hwif0->set_pio_mode = &cmd640_set_pio_mode;
+ #endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
+ 
++	idx[0] = cmd_hwif0->index;
++
+ 	/*
+ 	 * Ensure compatibility by always using the slowest timings
+ 	 * for access to the drive's command register block,
+@@ -826,6 +829,8 @@ int __init ide_probe_for_cmd640x (void)
+ 		cmd_hwif1->pio_mask = ATA_PIO5;
+ 		cmd_hwif1->set_pio_mode = &cmd640_set_pio_mode;
+ #endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
++
++		idx[1] = cmd_hwif1->index;
+ 	}
+ 	printk(KERN_INFO "%s: %sserialized, secondary interface %s\n", cmd_hwif1->name,
+ 		cmd_hwif0->serialized ? "" : "not ", port2);
+@@ -872,6 +877,13 @@ int __init ide_probe_for_cmd640x (void)
+ #ifdef CMD640_DUMP_REGS
+ 	cmd640_dump_regs();
+ #endif
++
++	ide_device_add(idx);
++
+ 	return 1;
+ }
+ 
++module_param_named(probe_vlb, cmd640_vlb, bool, 0);
++MODULE_PARM_DESC(probe_vlb, "probe for VLB version of CMD640 chipset");
++
++module_init(cmd640x_init);
 diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
-index bc55333..0b1e947 100644
+index bc55333..cd4eb9d 100644
 --- a/drivers/ide/pci/cmd64x.c
 +++ b/drivers/ide/pci/cmd64x.c
-@@ -322,8 +322,6 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
+@@ -1,5 +1,5 @@
+ /*
+- * linux/drivers/ide/pci/cmd64x.c		Version 1.52	Dec 24, 2007
++ * linux/drivers/ide/pci/cmd64x.c		Version 1.53	Dec 24, 2007
+  *
+  * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
+  *           Due to massive hardware bugs, UltraDMA is only supported
+@@ -22,8 +22,6 @@
+ 
+ #include <asm/io.h>
+ 
+-#define DISPLAY_CMD64X_TIMINGS
+-
+ #define CMD_DEBUG 0
+ 
+ #if CMD_DEBUG
+@@ -37,11 +35,6 @@
+  */
+ #define CFR		0x50
+ #define   CFR_INTR_CH0		0x04
+-#define CNTRL		0x51
+-#define   CNTRL_ENA_1ST 	0x04
+-#define   CNTRL_ENA_2ND 	0x08
+-#define   CNTRL_DIS_RA0 	0x40
+-#define   CNTRL_DIS_RA1 	0x80
+ 
+ #define	CMDTIM		0x52
+ #define	ARTTIM0		0x53
+@@ -60,108 +53,13 @@
+ #define MRDMODE		0x71
+ #define   MRDMODE_INTR_CH0	0x04
+ #define   MRDMODE_INTR_CH1	0x08
+-#define   MRDMODE_BLK_CH0	0x10
+-#define   MRDMODE_BLK_CH1	0x20
+-#define BMIDESR0	0x72
+ #define UDIDETCR0	0x73
+ #define DTPR0		0x74
+ #define BMIDECR1	0x78
+ #define BMIDECSR	0x79
+-#define BMIDESR1	0x7A
+ #define UDIDETCR1	0x7B
+ #define DTPR1		0x7C
+ 
+-#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
+-#include <linux/stat.h>
+-#include <linux/proc_fs.h>
+-
+-static u8 cmd64x_proc = 0;
+-
+-#define CMD_MAX_DEVS		5
+-
+-static struct pci_dev *cmd_devs[CMD_MAX_DEVS];
+-static int n_cmd_devs;
+-
+-static char * print_cmd64x_get_info (char *buf, struct pci_dev *dev, int index)
+-{
+-	char *p = buf;
+-	u8 reg72 = 0, reg73 = 0;			/* primary */
+-	u8 reg7a = 0, reg7b = 0;			/* secondary */
+-	u8 reg50 = 1, reg51 = 1, reg57 = 0, reg71 = 0;	/* extra */
+-
+-	p += sprintf(p, "\nController: %d\n", index);
+-	p += sprintf(p, "PCI-%x Chipset.\n", dev->device);
+-
+-	(void) pci_read_config_byte(dev, CFR,       &reg50);
+-	(void) pci_read_config_byte(dev, CNTRL,     &reg51);
+-	(void) pci_read_config_byte(dev, ARTTIM23,  &reg57);
+-	(void) pci_read_config_byte(dev, MRDMODE,   &reg71);
+-	(void) pci_read_config_byte(dev, BMIDESR0,  &reg72);
+-	(void) pci_read_config_byte(dev, UDIDETCR0, &reg73);
+-	(void) pci_read_config_byte(dev, BMIDESR1,  &reg7a);
+-	(void) pci_read_config_byte(dev, UDIDETCR1, &reg7b);
+-
+-	/* PCI0643/6 originally didn't have the primary channel enable bit */
+-	if ((dev->device == PCI_DEVICE_ID_CMD_643) ||
+-	    (dev->device == PCI_DEVICE_ID_CMD_646 && dev->revision < 3))
+-		reg51 |= CNTRL_ENA_1ST;
+-
+-	p += sprintf(p, "---------------- Primary Channel "
+-			"---------------- Secondary Channel ------------\n");
+-	p += sprintf(p, "                 %s                         %s\n",
+-		 (reg51 & CNTRL_ENA_1ST) ? "enabled " : "disabled",
+-		 (reg51 & CNTRL_ENA_2ND) ? "enabled " : "disabled");
+-	p += sprintf(p, "---------------- drive0 --------- drive1 "
+-			"-------- drive0 --------- drive1 ------\n");
+-	p += sprintf(p, "DMA enabled:     %s              %s"
+-			"             %s              %s\n",
+-		(reg72 & 0x20) ? "yes" : "no ", (reg72 & 0x40) ? "yes" : "no ",
+-		(reg7a & 0x20) ? "yes" : "no ", (reg7a & 0x40) ? "yes" : "no ");
+-	p += sprintf(p, "UltraDMA mode:   %s (%c)          %s (%c)",
+-		( reg73 & 0x01) ? " on" : "off",
+-		((reg73 & 0x30) == 0x30) ? ((reg73 & 0x04) ? '3' : '0') :
+-		((reg73 & 0x30) == 0x20) ? ((reg73 & 0x04) ? '3' : '1') :
+-		((reg73 & 0x30) == 0x10) ? ((reg73 & 0x04) ? '4' : '2') :
+-		((reg73 & 0x30) == 0x00) ? ((reg73 & 0x04) ? '5' : '2') : '?',
+-		( reg73 & 0x02) ? " on" : "off",
+-		((reg73 & 0xC0) == 0xC0) ? ((reg73 & 0x08) ? '3' : '0') :
+-		((reg73 & 0xC0) == 0x80) ? ((reg73 & 0x08) ? '3' : '1') :
+-		((reg73 & 0xC0) == 0x40) ? ((reg73 & 0x08) ? '4' : '2') :
+-		((reg73 & 0xC0) == 0x00) ? ((reg73 & 0x08) ? '5' : '2') : '?');
+-	p += sprintf(p, "         %s (%c)          %s (%c)\n",
+-		( reg7b & 0x01) ? " on" : "off",
+-		((reg7b & 0x30) == 0x30) ? ((reg7b & 0x04) ? '3' : '0') :
+-		((reg7b & 0x30) == 0x20) ? ((reg7b & 0x04) ? '3' : '1') :
+-		((reg7b & 0x30) == 0x10) ? ((reg7b & 0x04) ? '4' : '2') :
+-		((reg7b & 0x30) == 0x00) ? ((reg7b & 0x04) ? '5' : '2') : '?',
+-		( reg7b & 0x02) ? " on" : "off",
+-		((reg7b & 0xC0) == 0xC0) ? ((reg7b & 0x08) ? '3' : '0') :
+-		((reg7b & 0xC0) == 0x80) ? ((reg7b & 0x08) ? '3' : '1') :
+-		((reg7b & 0xC0) == 0x40) ? ((reg7b & 0x08) ? '4' : '2') :
+-		((reg7b & 0xC0) == 0x00) ? ((reg7b & 0x08) ? '5' : '2') : '?');
+-	p += sprintf(p, "Interrupt:       %s, %s                 %s, %s\n",
+-		(reg71 & MRDMODE_BLK_CH0  ) ? "blocked" : "enabled",
+-		(reg50 & CFR_INTR_CH0	  ) ? "pending" : "clear  ",
+-		(reg71 & MRDMODE_BLK_CH1  ) ? "blocked" : "enabled",
+-		(reg57 & ARTTIM23_INTR_CH1) ? "pending" : "clear  ");
+-
+-	return (char *)p;
+-}
+-
+-static int cmd64x_get_info (char *buffer, char **addr, off_t offset, int count)
+-{
+-	char *p = buffer;
+-	int i;
+-
+-	for (i = 0; i < n_cmd_devs; i++) {
+-		struct pci_dev *dev	= cmd_devs[i];
+-		p = print_cmd64x_get_info(p, dev, i);
+-	}
+-	return p-buffer;	/* => must be less than 4k! */
+-}
+-
+-#endif	/* defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
+-
+ static u8 quantize_timing(int timing, int quant)
+ {
+ 	return (timing + quant - 1) / quant;
+@@ -322,8 +220,6 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
  	case XFER_MW_DMA_0:
  		program_cycle_times(drive, 480, 215);
  		break;
@@ -56761,7 +62426,7 @@
  	}
  
  	if (speed >= XFER_SW_DMA_0)
-@@ -333,14 +331,15 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
+@@ -333,14 +229,15 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
  static int cmd648_ide_dma_end (ide_drive_t *drive)
  {
  	ide_hwif_t *hwif	= HWIF(drive);
@@ -56779,7 +62444,7 @@
  
  	return err;
  }
-@@ -365,10 +364,11 @@ static int cmd64x_ide_dma_end (ide_drive_t *drive)
+@@ -365,10 +262,11 @@ static int cmd64x_ide_dma_end (ide_drive_t *drive)
  static int cmd648_ide_dma_test_irq (ide_drive_t *drive)
  {
  	ide_hwif_t *hwif	= HWIF(drive);
@@ -56792,11 +62457,77 @@
  
  #ifdef DEBUG
  	printk("%s: dma_stat: 0x%02x mrdmode: 0x%02x irq_mask: 0x%02x\n",
+@@ -472,16 +370,6 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha
+ 	mrdmode &= ~0x30;
+ 	(void) pci_write_config_byte(dev, MRDMODE, (mrdmode | 0x02));
+ 
+-#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
+-
+-	cmd_devs[n_cmd_devs++] = dev;
+-
+-	if (!cmd64x_proc) {
+-		cmd64x_proc = 1;
+-		ide_pci_create_host_proc("cmd64x", cmd64x_get_info);
+-	}
+-#endif /* DISPLAY_CMD64X_TIMINGS && CONFIG_IDE_PROC_FS */
+-
+ 	return 0;
+ }
+ 
 diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
-index 0466462..d1a91bc 100644
+index 0466462..6ec00b8 100644
 --- a/drivers/ide/pci/cs5520.c
 +++ b/drivers/ide/pci/cs5520.c
-@@ -137,6 +137,7 @@ static void __devinit init_hwif_cs5520(ide_hwif_t *hwif)
+@@ -71,7 +71,6 @@ static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio)
+ 	ide_hwif_t *hwif = HWIF(drive);
+ 	struct pci_dev *pdev = hwif->pci_dev;
+ 	int controller = drive->dn > 1 ? 1 : 0;
+-	u8 reg;
+ 
+ 	/* FIXME: if DMA = 1 do we need to set the DMA bit here ? */
+ 
+@@ -91,11 +90,6 @@ static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio)
+ 	pci_write_config_byte(pdev, 0x66 + 4*controller + (drive->dn&1),
+ 		(cs5520_pio_clocks[pio].recovery << 4) |
+ 		(cs5520_pio_clocks[pio].assert));
+-		
+-	/* Set the DMA enable/disable flag */
+-	reg = inb(hwif->dma_base + 0x02 + 8*controller);
+-	reg |= 1<<((drive->dn&1)+5);
+-	outb(reg, hwif->dma_base + 0x02 + 8*controller);
+ }
+ 
+ static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed)
+@@ -109,13 +103,14 @@ static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed)
+  *	We wrap the DMA activate to set the vdma flag. This is needed
+  *	so that the IDE DMA layer issues PIO not DMA commands over the
+  *	DMA channel
++ *
++ *	ATAPI is harder so disable it for now using IDE_HFLAG_NO_ATAPI_DMA
+  */
+- 
+-static int cs5520_dma_on(ide_drive_t *drive)
++
++static void cs5520_dma_host_set(ide_drive_t *drive, int on)
+ {
+-	/* ATAPI is harder so leave it for now */
+-	drive->vdma = 1;
+-	return 0;
++	drive->vdma = on;
++	ide_dma_host_set(drive, on);
+ }
+ 
+ static void __devinit init_hwif_cs5520(ide_hwif_t *hwif)
+@@ -126,7 +121,7 @@ static void __devinit init_hwif_cs5520(ide_hwif_t *hwif)
+ 	if (hwif->dma_base == 0)
+ 		return;
+ 
+-	hwif->ide_dma_on = &cs5520_dma_on;
++	hwif->dma_host_set = &cs5520_dma_host_set;
+ }
+ 
+ #define DECLARE_CS_DEV(name_str)				\
+@@ -137,6 +132,7 @@ static void __devinit init_hwif_cs5520(ide_hwif_t *hwif)
  				  IDE_HFLAG_CS5520 |		\
  				  IDE_HFLAG_VDMA |		\
  				  IDE_HFLAG_NO_ATAPI_DMA |	\
@@ -56830,6 +62561,138 @@
  	.pio_mask	= ATA_PIO4,
  	.mwdma_mask	= ATA_MWDMA2,
  	.udma_mask	= ATA_UDMA4,
+diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c
+index 1cd4e9c..3ec4c65 100644
+--- a/drivers/ide/pci/cy82c693.c
++++ b/drivers/ide/pci/cy82c693.c
+@@ -1,5 +1,5 @@
+ /*
+- * linux/drivers/ide/pci/cy82c693.c		Version 0.42	Oct 23, 2007
++ * linux/drivers/ide/pci/cy82c693.c		Version 0.44	Nov 8, 2007
+  *
+  *  Copyright (C) 1998-2000 Andreas S. Krebs (akrebs at altavista.net), Maintainer
+  *  Copyright (C) 1998-2002 Andre Hedrick <andre at linux-ide.org>, Integrator
+@@ -176,17 +176,12 @@ static void compute_clocks (u8 pio, pio_clocks_t *p_pclk)
+  * set DMA mode a specific channel for CY82C693
+  */
+ 
+-static void cy82c693_dma_enable (ide_drive_t *drive, int mode, int single)
++static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode)
+ {
+-	u8 index = 0, data = 0;
++	ide_hwif_t *hwif = drive->hwif;
++	u8 single = (mode & 0x10) >> 4, index = 0, data = 0;
+ 
+-	if (mode>2)	/* make sure we set a valid mode */
+-		mode = 2;
+-			   
+-	if (mode > drive->id->tDMA)  /* to be absolutly sure we have a valid mode */
+-		mode = drive->id->tDMA;
+-	
+-	index = (HWIF(drive)->channel==0) ? CY82_INDEX_CHANNEL0 : CY82_INDEX_CHANNEL1;
++	index = hwif->channel ? CY82_INDEX_CHANNEL1 : CY82_INDEX_CHANNEL0;
+ 
+ #if CY82C693_DEBUG_LOGS
+ 	/* for debug let's show the previous values */
+@@ -199,7 +194,7 @@ static void cy82c693_dma_enable (ide_drive_t *drive, int mode, int single)
+ 		(data&0x3), ((data>>2)&1));
+ #endif /* CY82C693_DEBUG_LOGS */
+ 
+-	data = (u8)mode|(u8)(single<<2);
++	data = (mode & 3) | (single << 2);
+ 
+ 	outb(index, CY82_INDEX_PORT);
+ 	outb(data, CY82_DATA_PORT);
+@@ -207,7 +202,7 @@ static void cy82c693_dma_enable (ide_drive_t *drive, int mode, int single)
+ #if CY82C693_DEBUG_INFO
+ 	printk(KERN_INFO "%s (ch=%d, dev=%d): set DMA mode to %d (single=%d)\n",
+ 		drive->name, HWIF(drive)->channel, drive->select.b.unit,
+-		mode, single);
++		mode & 3, single);
+ #endif /* CY82C693_DEBUG_INFO */
+ 
+ 	/* 
+@@ -230,39 +225,6 @@ static void cy82c693_dma_enable (ide_drive_t *drive, int mode, int single)
+ #endif /* CY82C693_DEBUG_INFO */
+ }
+ 
+-/* 
+- * used to set DMA mode for CY82C693 (single and multi modes)
+- */
+-static int cy82c693_ide_dma_on (ide_drive_t *drive)
+-{
+-	struct hd_driveid *id = drive->id;
+-
+-#if CY82C693_DEBUG_INFO
+-	printk (KERN_INFO "dma_on: %s\n", drive->name);
+-#endif /* CY82C693_DEBUG_INFO */
+-
+-	if (id != NULL) {		
+-		/* Enable DMA on any drive that has DMA
+-		 * (multi or single) enabled
+-		 */
+-		if (id->field_valid & 2) {	/* regular DMA */
+-			int mmode, smode;
+-
+-			mmode = id->dma_mword & (id->dma_mword >> 8);
+-			smode = id->dma_1word & (id->dma_1word >> 8);
+-			       		      
+-			if (mmode != 0) {
+-				/* enable multi */
+-				cy82c693_dma_enable(drive, (mmode >> 1), 0);
+-			} else if (smode != 0) {
+-				/* enable single */
+-				cy82c693_dma_enable(drive, (smode >> 1), 1);
+-			}
+-		}
+-	}
+-        return __ide_dma_on(drive);
+-}
+-
+ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
+ {
+ 	ide_hwif_t *hwif = HWIF(drive);
+@@ -429,11 +391,7 @@ static unsigned int __devinit init_chipset_cy82c693(struct pci_dev *dev, const c
+ static void __devinit init_hwif_cy82c693(ide_hwif_t *hwif)
+ {
+ 	hwif->set_pio_mode = &cy82c693_set_pio_mode;
+-
+-	if (hwif->dma_base == 0)
+-		return;
+-
+-	hwif->ide_dma_on = &cy82c693_ide_dma_on;
++	hwif->set_dma_mode = &cy82c693_set_dma_mode;
+ }
+ 
+ static void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
+@@ -454,11 +412,11 @@ static const struct ide_port_info cy82c693_chipset __devinitdata = {
+ 	.init_iops	= init_iops_cy82c693,
+ 	.init_hwif	= init_hwif_cy82c693,
+ 	.chipset	= ide_cy82c693,
+-	.host_flags	= IDE_HFLAG_SINGLE | IDE_HFLAG_TRUST_BIOS_FOR_DMA |
++	.host_flags	= IDE_HFLAG_SINGLE | IDE_HFLAG_CY82C693 |
+ 			  IDE_HFLAG_BOOTABLE,
+ 	.pio_mask	= ATA_PIO4,
+-	.swdma_mask	= ATA_SWDMA2_ONLY,
+-	.mwdma_mask	= ATA_MWDMA2_ONLY,
++	.swdma_mask	= ATA_SWDMA2,
++	.mwdma_mask	= ATA_MWDMA2,
+ };
+ 
+ static int __devinit cy82c693_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c
+index 8382908..26aa492 100644
+--- a/drivers/ide/pci/delkin_cb.c
++++ b/drivers/ide/pci/delkin_cb.c
+@@ -80,7 +80,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
+ 	hw.irq = dev->irq;
+ 	hw.chipset = ide_pci;		/* this enables IRQ sharing */
+ 
+-	rc = ide_register_hw(&hw, &ide_undecoded_slave, 0, &hwif);
++	rc = ide_register_hw(&hw, &ide_undecoded_slave, &hwif);
+ 	if (rc < 0) {
+ 		printk(KERN_ERR "delkin_cb: ide_register_hw failed (%d)\n", rc);
+ 		pci_disable_device(dev);
 diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c
 index ae6307f..dfba0d1 100644
 --- a/drivers/ide/pci/hpt34x.c
@@ -56867,7 +62730,7 @@
  #ifdef CONFIG_HPT34X_AUTODMA
  		.swdma_mask	= ATA_SWDMA2,
 diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
-index 9fce25b..3777fb8 100644
+index 9fce25b..1268593 100644
 --- a/drivers/ide/pci/hpt366.c
 +++ b/drivers/ide/pci/hpt366.c
 @@ -1,5 +1,5 @@
@@ -57052,7 +62915,7 @@
  };
  
  static int check_in_drive_list(ide_drive_t *drive, const char **list)
-@@ -675,71 +693,33 @@ static u32 get_speed_setting(u8 speed, struct hpt_info *info)
+@@ -675,94 +693,50 @@ static u32 get_speed_setting(u8 speed, struct hpt_info *info)
  	for (i = 0; i < ARRAY_SIZE(xfer_speeds) - 1; i++)
  		if (xfer_speeds[i] == speed)
  			break;
@@ -57136,23 +62999,36 @@
  static void hpt3xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
  {
  	hpt3xx_set_mode(drive, XFER_PIO_0 + pio);
-@@ -756,15 +736,6 @@ static int hpt3xx_quirkproc(ide_drive_t *drive)
- 	return 0;
  }
  
+-static int hpt3xx_quirkproc(ide_drive_t *drive)
++static void hpt3xx_quirkproc(ide_drive_t *drive)
+ {
+ 	struct hd_driveid *id	= drive->id;
+ 	const  char **list	= quirk_drives;
+ 
+ 	while (*list)
+-		if (strstr(id->model, *list++))
+-			return 1;
+-	return 0;
+-}
+-
 -static void hpt3xx_intrproc(ide_drive_t *drive)
 -{
 -	if (drive->quirk_list)
 -		return;
--
++		if (strstr(id->model, *list++)) {
++			drive->quirk_list = 1;
++			return;
++		}
+ 
 -	/* drives in the quirk_list may not like intr setups/cleanups */
 -	outb(drive->ctl | 2, IDE_CONTROL_REG);
--}
--
++	drive->quirk_list = 0;
+ }
+ 
  static void hpt3xx_maskproc(ide_drive_t *drive, int mask)
- {
- 	ide_hwif_t *hwif	= HWIF(drive);
-@@ -914,32 +885,33 @@ static int hpt374_ide_dma_end(ide_drive_t *drive)
+@@ -914,32 +888,33 @@ static int hpt374_ide_dma_end(ide_drive_t *drive)
  
  static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode)
  {
@@ -57196,7 +63072,7 @@
  }
  
  /**
-@@ -1210,7 +1182,7 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
+@@ -1210,7 +1185,7 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
  	 * We also  don't like using  the DPLL because this causes glitches
  	 * on PRST-/SRST- when the state engine gets reset...
  	 */
@@ -57205,7 +63081,7 @@
  		u16 f_low, delta = pci_clk < 50 ? 2 : 4;
  		int adjust;
  
-@@ -1226,7 +1198,7 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
+@@ -1226,7 +1201,7 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
  			clock = ATA_CLOCK_50MHZ;
  		}
  
@@ -57214,7 +63090,7 @@
  			printk(KERN_ERR "%s: unknown bus timing!\n", name);
  			kfree(info);
  			return -EIO;
-@@ -1267,15 +1239,10 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
+@@ -1267,15 +1242,10 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
  		printk("%s: using %d MHz PCI clock\n", name, pci_clk);
  	}
  
@@ -57231,7 +63107,7 @@
  
  	/* Point to this chip's own instance of the hpt_info structure. */
  	pci_set_drvdata(dev, info);
-@@ -1320,8 +1287,8 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
+@@ -1320,8 +1290,8 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
  
  	hwif->set_pio_mode	= &hpt3xx_set_pio_mode;
  	hwif->set_dma_mode	= &hpt3xx_set_mode;
@@ -57241,7 +63117,7 @@
  	hwif->maskproc		= &hpt3xx_maskproc;
  	hwif->busproc		= &hpt3xx_busproc;
  
-@@ -1494,6 +1461,11 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
+@@ -1494,6 +1464,11 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
  	return 0;
  }
  
@@ -57253,7 +63129,7 @@
  static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
  	{	/* 0 */
  		.name		= "HPT36x",
-@@ -1508,9 +1480,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
+@@ -1508,9 +1483,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
  		 */
  		.enablebits	= {{0x50,0x10,0x10}, {0x54,0x04,0x04}},
  		.extra		= 240,
@@ -57264,7 +63140,7 @@
  		.pio_mask	= ATA_PIO4,
  		.mwdma_mask	= ATA_MWDMA2,
  	},{	/* 1 */
-@@ -1520,7 +1490,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
+@@ -1520,7 +1493,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
  		.init_dma	= init_dma_hpt366,
  		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
  		.extra		= 240,
@@ -57273,7 +63149,7 @@
  		.pio_mask	= ATA_PIO4,
  		.mwdma_mask	= ATA_MWDMA2,
  	},{	/* 2 */
-@@ -1530,7 +1500,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
+@@ -1530,7 +1503,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
  		.init_dma	= init_dma_hpt366,
  		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
  		.extra		= 240,
@@ -57282,7 +63158,7 @@
  		.pio_mask	= ATA_PIO4,
  		.mwdma_mask	= ATA_MWDMA2,
  	},{	/* 3 */
-@@ -1540,7 +1510,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
+@@ -1540,7 +1513,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
  		.init_dma	= init_dma_hpt366,
  		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
  		.extra		= 240,
@@ -57291,7 +63167,7 @@
  		.pio_mask	= ATA_PIO4,
  		.mwdma_mask	= ATA_MWDMA2,
  	},{	/* 4 */
-@@ -1551,7 +1521,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
+@@ -1551,7 +1524,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
  		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
  		.udma_mask	= ATA_UDMA5,
  		.extra		= 240,
@@ -57300,7 +63176,7 @@
  		.pio_mask	= ATA_PIO4,
  		.mwdma_mask	= ATA_MWDMA2,
  	},{	/* 5 */
-@@ -1561,7 +1531,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
+@@ -1561,7 +1534,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
  		.init_dma	= init_dma_hpt366,
  		.enablebits	= {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
  		.extra		= 240,
@@ -57342,8 +63218,90 @@
  		if (!(reg48 & u_flag))
  			pci_write_config_byte(dev, 0x48, reg48 | u_flag);
  		if (speed >= XFER_UDMA_5) {
+diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
+index 99b7d76..e610a53 100644
+--- a/drivers/ide/pci/it821x.c
++++ b/drivers/ide/pci/it821x.c
+@@ -431,33 +431,29 @@ static u8 __devinit ata66_it821x(ide_hwif_t *hwif)
+ }
+ 
+ /**
+- *	it821x_fixup	-	post init callback
+- *	@hwif: interface
++ *	it821x_quirkproc	-	post init callback
++ *	@drive: drive
+  *
+- *	This callback is run after the drives have been probed but
++ *	This callback is run after the drive has been probed but
+  *	before anything gets attached. It allows drivers to do any
+  *	final tuning that is needed, or fixups to work around bugs.
+  */
+ 
+-static void __devinit it821x_fixups(ide_hwif_t *hwif)
++static void __devinit it821x_quirkproc(ide_drive_t *drive)
+ {
+-	struct it821x_dev *itdev = ide_get_hwifdata(hwif);
+-	int i;
++	struct it821x_dev *itdev = ide_get_hwifdata(drive->hwif);
++	struct hd_driveid *id = drive->id;
++	u16 *idbits = (u16 *)drive->id;
+ 
+-	if(!itdev->smart) {
++	if (!itdev->smart) {
+ 		/*
+ 		 *	If we are in pass through mode then not much
+ 		 *	needs to be done, but we do bother to clear the
+ 		 *	IRQ mask as we may well be in PIO (eg rev 0x10)
+ 		 *	for now and we know unmasking is safe on this chipset.
+ 		 */
+-		for (i = 0; i < 2; i++) {
+-			ide_drive_t *drive = &hwif->drives[i];
+-			if(drive->present)
+-				drive->unmask = 1;
+-		}
+-		return;
+-	}
++		drive->unmask = 1;
++	} else {
+ 	/*
+ 	 *	Perform fixups on smart mode. We need to "lose" some
+ 	 *	capabilities the firmware lacks but does not filter, and
+@@ -465,16 +461,6 @@ static void __devinit it821x_fixups(ide_hwif_t *hwif)
+ 	 *	in RAID mode.
+ 	 */
+ 
+-	for(i = 0; i < 2; i++) {
+-		ide_drive_t *drive = &hwif->drives[i];
+-		struct hd_driveid *id;
+-		u16 *idbits;
+-
+-		if(!drive->present)
+-			continue;
+-		id = drive->id;
+-		idbits = (u16 *)drive->id;
+-
+ 		/* Check for RAID v native */
+ 		if(strstr(id->model, "Integrated Technology Express")) {
+ 			/* In raid mode the ident block is slightly buggy
+@@ -537,6 +523,8 @@ static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
+ 	struct it821x_dev *idev = kzalloc(sizeof(struct it821x_dev), GFP_KERNEL);
+ 	u8 conf;
+ 
++	hwif->quirkproc = &it821x_quirkproc;
++
+ 	if (idev == NULL) {
+ 		printk(KERN_ERR "it821x: out of memory, falling back to legacy behaviour.\n");
+ 		return;
+@@ -633,7 +621,6 @@ static unsigned int __devinit init_chipset_it821x(struct pci_dev *dev, const cha
+ 		.name		= name_str,		\
+ 		.init_chipset	= init_chipset_it821x,	\
+ 		.init_hwif	= init_hwif_it821x,	\
+-		.fixup	 	= it821x_fixups,	\
+ 		.host_flags	= IDE_HFLAG_BOOTABLE,	\
+ 		.pio_mask	= ATA_PIO4,		\
+ 	}
 diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
-index 2b4f44e..ef4a99b 100644
+index 2b4f44e..89d2363 100644
 --- a/drivers/ide/pci/pdc202xx_new.c
 +++ b/drivers/ide/pci/pdc202xx_new.c
 @@ -146,7 +146,7 @@ static struct udma_timing {
@@ -57429,7 +63387,29 @@
  }
  
  static u8 pdcnew_cable_detect(ide_hwif_t *hwif)
-@@ -466,7 +446,7 @@ static unsigned int __devinit init_chipset_pdcnew(struct pci_dev *dev, const cha
+@@ -223,14 +203,17 @@ static u8 pdcnew_cable_detect(ide_hwif_t *hwif)
+ 		return ATA_CBL_PATA80;
+ }
+ 
+-static int pdcnew_quirkproc(ide_drive_t *drive)
++static void pdcnew_quirkproc(ide_drive_t *drive)
+ {
+ 	const char **list, *model = drive->id->model;
+ 
+ 	for (list = pdc_quirk_drives; *list != NULL; list++)
+-		if (strstr(model, *list) != NULL)
+-			return 2;
+-	return 0;
++		if (strstr(model, *list) != NULL) {
++			drive->quirk_list = 2;
++			return;
++		}
++
++	drive->quirk_list = 0;
+ }
+ 
+ static void pdcnew_reset(ide_drive_t *drive)
+@@ -466,7 +449,7 @@ static unsigned int __devinit init_chipset_pdcnew(struct pci_dev *dev, const cha
  static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif)
  {
  	hwif->set_pio_mode = &pdcnew_set_pio_mode;
@@ -57439,7 +63419,7 @@
  	hwif->quirkproc = &pdcnew_quirkproc;
  	hwif->resetproc = &pdcnew_reset;
 diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
-index e09742e..67b2781 100644
+index e09742e..3a1e081 100644
 --- a/drivers/ide/pci/pdc202xx_old.c
 +++ b/drivers/ide/pci/pdc202xx_old.c
 @@ -162,7 +162,7 @@ static u8 pdc202xx_old_cable_detect (ide_hwif_t *hwif)
@@ -57451,7 +63431,7 @@
  	u8 clock = inb(clock_reg);
  
  	outb(clock | (hwif->channel ? 0x08 : 0x02), clock_reg);
-@@ -170,7 +170,7 @@ static void pdc_old_enable_66MHz_clock(ide_hwif_t *hwif)
+@@ -170,20 +170,23 @@ static void pdc_old_enable_66MHz_clock(ide_hwif_t *hwif)
  
  static void pdc_old_disable_66MHz_clock(ide_hwif_t *hwif)
  {
@@ -57460,7 +63440,27 @@
  	u8 clock = inb(clock_reg);
  
  	outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg);
-@@ -193,7 +193,7 @@ static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)
+ }
+ 
+-static int pdc202xx_quirkproc (ide_drive_t *drive)
++static void pdc202xx_quirkproc(ide_drive_t *drive)
+ {
+ 	const char **list, *model = drive->id->model;
+ 
+ 	for (list = pdc_quirk_drives; *list != NULL; list++)
+-		if (strstr(model, *list) != NULL)
+-			return 2;
+-	return 0;
++		if (strstr(model, *list) != NULL) {
++			drive->quirk_list = 2;
++			return;
++		}
++
++	drive->quirk_list = 0;
+ }
+ 
+ static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)
+@@ -193,7 +196,7 @@ static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)
  	if (drive->media != ide_disk || drive->addressing == 1) {
  		struct request *rq	= HWGROUP(drive)->rq;
  		ide_hwif_t *hwif	= HWIF(drive);
@@ -57469,7 +63469,7 @@
  		unsigned long atapi_reg	= high_16 + (hwif->channel ? 0x24 : 0x20);
  		u32 word_count	= 0;
  		u8 clock = inb(high_16 + 0x11);
-@@ -212,7 +212,7 @@ static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
+@@ -212,7 +215,7 @@ static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
  {
  	if (drive->media != ide_disk || drive->addressing == 1) {
  		ide_hwif_t *hwif	= HWIF(drive);
@@ -57478,7 +63478,7 @@
  		unsigned long atapi_reg	= high_16 + (hwif->channel ? 0x24 : 0x20);
  		u8 clock		= 0;
  
-@@ -228,7 +228,7 @@ static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
+@@ -228,7 +231,7 @@ static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
  static int pdc202xx_old_ide_dma_test_irq(ide_drive_t *drive)
  {
  	ide_hwif_t *hwif	= HWIF(drive);
@@ -57487,7 +63487,7 @@
  	u8 dma_stat		= inb(hwif->dma_status);
  	u8 sc1d			= inb(high_16 + 0x001d);
  
-@@ -271,7 +271,7 @@ static void pdc202xx_dma_timeout(ide_drive_t *drive)
+@@ -271,7 +274,7 @@ static void pdc202xx_dma_timeout(ide_drive_t *drive)
  
  static void pdc202xx_reset_host (ide_hwif_t *hwif)
  {
@@ -57496,7 +63496,7 @@
  	u8 udma_speed_flag	= inb(high_16 | 0x001f);
  
  	outb(udma_speed_flag | 0x10, high_16 | 0x001f);
-@@ -375,6 +375,11 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
+@@ -375,6 +378,11 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
  	}
  }
  
@@ -57508,7 +63508,7 @@
  #define DECLARE_PDC2026X_DEV(name_str, udma, extra_flags) \
  	{ \
  		.name		= name_str, \
-@@ -382,9 +387,7 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
+@@ -382,9 +390,7 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
  		.init_hwif	= init_hwif_pdc202xx, \
  		.init_dma	= init_dma_pdc202xx, \
  		.extra		= 48, \
@@ -57519,7 +63519,7 @@
  		.pio_mask	= ATA_PIO4, \
  		.mwdma_mask	= ATA_MWDMA2, \
  		.udma_mask	= udma, \
-@@ -397,8 +400,7 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
+@@ -397,8 +403,7 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
  		.init_hwif	= init_hwif_pdc202xx,
  		.init_dma	= init_dma_pdc202xx,
  		.extra		= 16,
@@ -57559,7 +63559,7 @@
  			pci_write_config_byte(dev, 0x48, reg48 | u_flag);
  		if (speed == XFER_UDMA_5) {
 diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/pci/sc1200.c
-index 707d5ff..fef20bd 100644
+index 707d5ff..32fdf53 100644
 --- a/drivers/ide/pci/sc1200.c
 +++ b/drivers/ide/pci/sc1200.c
 @@ -135,59 +135,29 @@ static void sc1200_set_dma_mode(ide_drive_t *drive, const u8 mode)
@@ -57639,6 +63639,19 @@
  
  	if (unit == 0) {			/* are we configuring drive0? */
  		pci_read_config_dword(hwif->pci_dev, basereg+4, &reg);
+@@ -250,9 +220,9 @@ static void sc1200_set_pio_mode(ide_drive_t *drive, const u8 pio)
+ 	}
+ 	if (mode != -1) {
+ 		printk("SC1200: %s: changing (U)DMA mode\n", drive->name);
+-		hwif->dma_off_quietly(drive);
+-		if (ide_set_dma_mode(drive, mode) == 0)
+-			hwif->dma_host_on(drive);
++		ide_dma_off_quietly(drive);
++		if (ide_set_dma_mode(drive, mode) == 0 && drive->using_dma)
++			hwif->dma_host_set(drive, 1);
+ 		return;
+ 	}
+ 
 @@ -260,66 +230,39 @@ static void sc1200_set_pio_mode(ide_drive_t *drive, const u8 pio)
  }
  
@@ -57801,10 +63814,42 @@
  	jcactsel = JCACTSELtbl[offset][idx];
  	if (is_slave) {
 diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
-index a728031..e9bd269 100644
+index a728031..877c09b 100644
 --- a/drivers/ide/pci/serverworks.c
 +++ b/drivers/ide/pci/serverworks.c
-@@ -366,12 +366,17 @@ static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
+@@ -164,25 +164,12 @@ static void svwks_set_dma_mode(ide_drive_t *drive, const u8 speed)
+ 	ultra_timing	&= ~(0x0F << (4*unit));
+ 	ultra_enable	&= ~(0x01 << drive->dn);
+ 
+-	switch(speed) {
+-		case XFER_MW_DMA_2:
+-		case XFER_MW_DMA_1:
+-		case XFER_MW_DMA_0:
+-			dma_timing |= dma_modes[speed - XFER_MW_DMA_0];
+-			break;
+-
+-		case XFER_UDMA_5:
+-		case XFER_UDMA_4:
+-		case XFER_UDMA_3:
+-		case XFER_UDMA_2:
+-		case XFER_UDMA_1:
+-		case XFER_UDMA_0:
+-			dma_timing   |= dma_modes[2];
+-			ultra_timing |= ((udma_modes[speed - XFER_UDMA_0]) << (4*unit));
+-			ultra_enable |= (0x01 << drive->dn);
+-		default:
+-			break;
+-	}
++	if (speed >= XFER_UDMA_0) {
++		dma_timing   |= dma_modes[2];
++		ultra_timing |= (udma_modes[speed - XFER_UDMA_0] << (4 * unit));
++		ultra_enable |= (0x01 << drive->dn);
++	} else if (speed >= XFER_MW_DMA_0)
++		dma_timing   |= dma_modes[speed - XFER_MW_DMA_0];
+ 
+ 	pci_write_config_byte(dev, drive_pci2[drive->dn], dma_timing);
+ 	pci_write_config_byte(dev, (0x56|hwif->channel), ultra_timing);
+@@ -366,12 +353,17 @@ static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
  	}
  }
  
@@ -57823,7 +63868,7 @@
  		.pio_mask	= ATA_PIO4,
  		.mwdma_mask	= ATA_MWDMA2,
  		.udma_mask	= 0x00, /* UDMA is problematic on OSB4 */
-@@ -379,7 +384,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
+@@ -379,7 +371,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
  		.name		= "SvrWks CSB5",
  		.init_chipset	= init_chipset_svwks,
  		.init_hwif	= init_hwif_svwks,
@@ -57832,7 +63877,7 @@
  		.pio_mask	= ATA_PIO4,
  		.mwdma_mask	= ATA_MWDMA2,
  		.udma_mask	= ATA_UDMA5,
-@@ -387,7 +392,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
+@@ -387,7 +379,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
  		.name		= "SvrWks CSB6",
  		.init_chipset	= init_chipset_svwks,
  		.init_hwif	= init_hwif_svwks,
@@ -57841,7 +63886,7 @@
  		.pio_mask	= ATA_PIO4,
  		.mwdma_mask	= ATA_MWDMA2,
  		.udma_mask	= ATA_UDMA5,
-@@ -395,8 +400,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
+@@ -395,8 +387,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
  		.name		= "SvrWks CSB6",
  		.init_chipset	= init_chipset_svwks,
  		.init_hwif	= init_hwif_svwks,
@@ -57851,7 +63896,7 @@
  		.pio_mask	= ATA_PIO4,
  		.mwdma_mask	= ATA_MWDMA2,
  		.udma_mask	= ATA_UDMA5,
-@@ -404,8 +408,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
+@@ -404,8 +395,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
  		.name		= "SvrWks HT1000",
  		.init_chipset	= init_chipset_svwks,
  		.init_hwif	= init_hwif_svwks,
@@ -57862,10 +63907,49 @@
  		.mwdma_mask	= ATA_MWDMA2,
  		.udma_mask	= ATA_UDMA5,
 diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
-index de820aa..7e9dade 100644
+index de820aa..9e0be7d 100644
 --- a/drivers/ide/pci/sgiioc4.c
 +++ b/drivers/ide/pci/sgiioc4.c
-@@ -582,7 +582,6 @@ ide_init_sgiioc4(ide_hwif_t * hwif)
+@@ -277,21 +277,6 @@ sgiioc4_ide_dma_end(ide_drive_t * drive)
+ 	return dma_stat;
+ }
+ 
+-static int
+-sgiioc4_ide_dma_on(ide_drive_t * drive)
+-{
+-	drive->using_dma = 1;
+-
+-	return 0;
+-}
+-
+-static void sgiioc4_dma_off_quietly(ide_drive_t *drive)
+-{
+-	drive->using_dma = 0;
+-
+-	drive->hwif->dma_host_off(drive);
+-}
+-
+ static void sgiioc4_set_dma_mode(ide_drive_t *drive, const u8 speed)
+ {
+ }
+@@ -303,13 +288,10 @@ sgiioc4_ide_dma_test_irq(ide_drive_t * drive)
+ 	return sgiioc4_checkirq(HWIF(drive));
+ }
+ 
+-static void sgiioc4_dma_host_on(ide_drive_t * drive)
+-{
+-}
+-
+-static void sgiioc4_dma_host_off(ide_drive_t * drive)
++static void sgiioc4_dma_host_set(ide_drive_t *drive, int on)
+ {
+-	sgiioc4_clearirq(drive);
++	if (!on)
++		sgiioc4_clearirq(drive);
+ }
+ 
+ static void
+@@ -582,7 +564,6 @@ ide_init_sgiioc4(ide_hwif_t * hwif)
  	hwif->pre_reset = NULL;	/* No HBA specific pre_set needed */
  	hwif->resetproc = &sgiioc4_resetproc;/* Reset DMA engine,
  						clear interrupts */
@@ -57873,8 +63957,61 @@
  	hwif->maskproc = &sgiioc4_maskproc;	/* Mask on/off NIEN register */
  	hwif->quirkproc = NULL;
  	hwif->busproc = NULL;
+@@ -594,14 +575,11 @@ ide_init_sgiioc4(ide_hwif_t * hwif)
+ 
+ 	hwif->mwdma_mask = ATA_MWDMA2_ONLY;
+ 
++	hwif->dma_host_set = &sgiioc4_dma_host_set;
+ 	hwif->dma_setup = &sgiioc4_ide_dma_setup;
+ 	hwif->dma_start = &sgiioc4_ide_dma_start;
+ 	hwif->ide_dma_end = &sgiioc4_ide_dma_end;
+-	hwif->ide_dma_on = &sgiioc4_ide_dma_on;
+-	hwif->dma_off_quietly = &sgiioc4_dma_off_quietly;
+ 	hwif->ide_dma_test_irq = &sgiioc4_ide_dma_test_irq;
+-	hwif->dma_host_on = &sgiioc4_dma_host_on;
+-	hwif->dma_host_off = &sgiioc4_dma_host_off;
+ 	hwif->dma_lost_irq = &sgiioc4_dma_lost_irq;
+ 	hwif->dma_timeout = &ide_dma_timeout;
+ }
+@@ -615,6 +593,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
+ 	ide_hwif_t *hwif;
+ 	int h;
+ 	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
++	hw_regs_t hw;
+ 
+ 	/*
+ 	 * Find an empty HWIF; if none available, return -ENOMEM.
+@@ -654,21 +633,16 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
+ 		return -ENOMEM;
+ 	}
+ 
+-	if (hwif->io_ports[IDE_DATA_OFFSET] != cmd_base) {
+-		hw_regs_t hw;
+-
+-		/* Initialize the IO registers */
+-		memset(&hw, 0, sizeof(hw));
+-		sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport);
+-		memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
+-		hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET];
+-	}
++	/* Initialize the IO registers */
++	memset(&hw, 0, sizeof(hw));
++	sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport);
++	hw.irq = dev->irq;
++	hw.chipset = ide_pci;
++	hw.dev = &dev->dev;
++	ide_init_port_hw(hwif, &hw);
+ 
+-	hwif->irq = dev->irq;
+-	hwif->chipset = ide_pci;
+ 	hwif->pci_dev = dev;
+ 	hwif->channel = 0;	/* Single Channel chip */
+-	hwif->gendev.parent = &dev->dev;/* setup proper ancestral information */
+ 
+ 	/* The IOC4 uses MMIO rather than Port IO. */
+ 	default_hwif_mmiops(hwif);
 diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
-index 5709c25..7b45eaf 100644
+index 5709c25..908f37b 100644
 --- a/drivers/ide/pci/siimage.c
 +++ b/drivers/ide/pci/siimage.c
 @@ -278,27 +278,14 @@ static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed)
@@ -57913,6 +64050,57 @@
  	}
  
  	if (hwif->mmio) {
+@@ -726,9 +713,6 @@ static int is_dev_seagate_sata(ide_drive_t *drive)
+ 	const char *s = &drive->id->model[0];
+ 	unsigned len;
+ 
+-	if (!drive->present)
+-		return 0;
+-
+ 	len = strnlen(s, sizeof(drive->id->model));
+ 
+ 	if ((len > 4) && (!memcmp(s, "ST", 2))) {
+@@ -743,18 +727,20 @@ static int is_dev_seagate_sata(ide_drive_t *drive)
+ }
+ 
+ /**
+- *	siimage_fixup		-	post probe fixups
+- *	@hwif: interface to fix up
++ *	sil_quirkproc		-	post probe fixups
++ *	@drive: drive
+  *
+  *	Called after drive probe we use this to decide whether the
+  *	Seagate fixup must be applied. This used to be in init_iops but
+  *	that can occur before we know what drives are present.
+  */
+ 
+-static void __devinit siimage_fixup(ide_hwif_t *hwif)
++static void __devinit sil_quirkproc(ide_drive_t *drive)
+ {
++	ide_hwif_t *hwif = drive->hwif;
++
+ 	/* Try and raise the rqsize */
+-	if (!is_sata(hwif) || !is_dev_seagate_sata(&hwif->drives[0]))
++	if (!is_sata(hwif) || !is_dev_seagate_sata(drive))
+ 		hwif->rqsize = 128;
+ }
+ 
+@@ -817,6 +803,7 @@ static void __devinit init_hwif_siimage(ide_hwif_t *hwif)
+ 
+ 	hwif->set_pio_mode = &sil_set_pio_mode;
+ 	hwif->set_dma_mode = &sil_set_dma_mode;
++	hwif->quirkproc = &sil_quirkproc;
+ 
+ 	if (sata) {
+ 		static int first = 1;
+@@ -855,7 +842,6 @@ static void __devinit init_hwif_siimage(ide_hwif_t *hwif)
+ 		.init_chipset	= init_chipset_siimage,	\
+ 		.init_iops	= init_iops_siimage,	\
+ 		.init_hwif	= init_hwif_siimage,	\
+-		.fixup		= siimage_fixup,	\
+ 		.host_flags	= IDE_HFLAG_BOOTABLE,	\
+ 		.pio_mask	= ATA_PIO4,		\
+ 		.mwdma_mask	= ATA_MWDMA2,		\
 diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
 index d90b429..85d3699 100644
 --- a/drivers/ide/pci/sis5513.c
@@ -58026,10 +64214,35 @@
  
  static u8 sis5513_ata133_udma_filter(ide_drive_t *drive)
 diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c
-index 147d783..069f104 100644
+index 147d783..c7a125b 100644
 --- a/drivers/ide/pci/sl82c105.c
 +++ b/drivers/ide/pci/sl82c105.c
-@@ -115,32 +115,24 @@ static void sl82c105_set_dma_mode(ide_drive_t *drive, const u8 speed)
+@@ -13,6 +13,7 @@
+  *  -- Benjamin Herrenschmidt (01/11/03) benh at kernel.crashing.org
+  *
+  * Copyright (C) 2006-2007 MontaVista Software, Inc. <source at mvista.com>
++ * Copyright (C)      2007 Bartlomiej Zolnierkiewicz
+  */
+ 
+ #include <linux/types.h>
+@@ -90,14 +91,8 @@ static void sl82c105_set_pio_mode(ide_drive_t *drive, const u8 pio)
+ 	drive->drive_data &= 0xffff0000;
+ 	drive->drive_data |= drv_ctrl;
+ 
+-	if (!drive->using_dma) {
+-		/*
+-		 * If we are actually using MW DMA, then we can not
+-		 * reprogram the interface drive control register.
+-		 */
+-		pci_write_config_word(dev, reg,  drv_ctrl);
+-		pci_read_config_word (dev, reg, &drv_ctrl);
+-	}
++	pci_write_config_word(dev, reg,  drv_ctrl);
++	pci_read_config_word (dev, reg, &drv_ctrl);
+ 
+ 	printk(KERN_DEBUG "%s: selected %s (%dns) (%04X)\n", drive->name,
+ 			  ide_xfer_verbose(pio + XFER_PIO_0),
+@@ -115,33 +110,14 @@ static void sl82c105_set_dma_mode(ide_drive_t *drive, const u8 speed)
   	DBG(("sl82c105_tune_chipset(drive:%s, speed:%s)\n",
  	     drive->name, ide_xfer_verbose(speed)));
  
@@ -58046,13 +64259,7 @@
 -		 */
 -		drive->drive_data &= 0x0000ffff;
 -		drive->drive_data |= (unsigned long)drv_ctrl << 16;
-+	/*
-+	 * Store the DMA timings so that we can actually program
-+	 * them when DMA will be turned on...
-+	 */
-+	drive->drive_data &= 0x0000ffff;
-+	drive->drive_data |= (unsigned long)drv_ctrl << 16;
- 
+-
 -		/*
 -		 * If we are already using DMA, we just reprogram
 -		 * the drive control register.
@@ -58066,18 +64273,138 @@
 -		break;
 -	default:
 -		return;
+-	}
 +	/*
-+	 * If we are already using DMA, we just reprogram
-+	 * the drive control register.
++	 * Store the DMA timings so that we can actually program
++	 * them when DMA will be turned on...
 +	 */
-+	if (drive->using_dma) {
-+		struct pci_dev *dev	= HWIF(drive)->pci_dev;
-+		int reg 		= 0x44 + drive->dn * 4;
++	drive->drive_data &= 0x0000ffff;
++	drive->drive_data |= (unsigned long)drv_ctrl << 16;
+ }
+ 
+ /*
+@@ -209,6 +185,11 @@ static void sl82c105_dma_start(ide_drive_t *drive)
+ {
+ 	ide_hwif_t *hwif	= HWIF(drive);
+ 	struct pci_dev *dev	= hwif->pci_dev;
++	int reg 		= 0x44 + drive->dn * 4;
 +
-+		pci_write_config_word(dev, reg, drv_ctrl);
- 	}
++	DBG(("%s(drive:%s)\n", __FUNCTION__, drive->name));
++
++	pci_write_config_word(dev, reg, drive->drive_data >> 16);
+ 
+ 	sl82c105_reset_host(dev);
+ 	ide_dma_start(drive);
+@@ -222,64 +203,24 @@ static void sl82c105_dma_timeout(ide_drive_t *drive)
+ 	ide_dma_timeout(drive);
+ }
+ 
+-static int sl82c105_ide_dma_on(ide_drive_t *drive)
+-{
+-	struct pci_dev *dev	= HWIF(drive)->pci_dev;
+-	int rc, reg 		= 0x44 + drive->dn * 4;
+-
+-	DBG(("sl82c105_ide_dma_on(drive:%s)\n", drive->name));
+-
+-	rc = __ide_dma_on(drive);
+-	if (rc == 0) {
+-		pci_write_config_word(dev, reg, drive->drive_data >> 16);
+-
+-		printk(KERN_INFO "%s: DMA enabled\n", drive->name);
+-	}
+-	return rc;
+-}
+-
+-static void sl82c105_dma_off_quietly(ide_drive_t *drive)
++static int sl82c105_dma_end(ide_drive_t *drive)
+ {
+ 	struct pci_dev *dev	= HWIF(drive)->pci_dev;
+ 	int reg 		= 0x44 + drive->dn * 4;
++	int ret;
+ 
+-	DBG(("sl82c105_dma_off_quietly(drive:%s)\n", drive->name));
++	DBG(("%s(drive:%s)\n", __FUNCTION__, drive->name));
+ 
+-	pci_write_config_word(dev, reg, drive->drive_data);
++	ret = __ide_dma_end(drive);
+ 
+-	ide_dma_off_quietly(drive);
+-}
++	pci_write_config_word(dev, reg, drive->drive_data);
+ 
+-/*
+- * Ok, that is nasty, but we must make sure the DMA timings
+- * won't be used for a PIO access. The solution here is
+- * to make sure the 16 bits mode is diabled on the channel
+- * when DMA is enabled, thus causing the chip to use PIO0
+- * timings for those operations.
+- */
+-static void sl82c105_selectproc(ide_drive_t *drive)
+-{
+-	ide_hwif_t *hwif	= HWIF(drive);
+-	struct pci_dev *dev	= hwif->pci_dev;
+-	u32 val, old, mask;
+-
+-	//DBG(("sl82c105_selectproc(drive:%s)\n", drive->name));
+-
+-	mask = hwif->channel ? CTRL_P1F16 : CTRL_P0F16;
+-	old = val = (u32)pci_get_drvdata(dev);
+-	if (drive->using_dma)
+-		val &= ~mask;
+-	else
+-		val |= mask;
+-	if (old != val) {
+-		pci_write_config_dword(dev, 0x40, val);	
+-		pci_set_drvdata(dev, (void *)val);
+-	}
++	return ret;
+ }
+ 
+ /*
+  * ATA reset will clear the 16 bits mode in the control
+- * register, we need to update our cache
++ * register, we need to reprogram it
+  */
+ static void sl82c105_resetproc(ide_drive_t *drive)
+ {
+@@ -289,7 +230,8 @@ static void sl82c105_resetproc(ide_drive_t *drive)
+ 	DBG(("sl82c105_resetproc(drive:%s)\n", drive->name));
+ 
+ 	pci_read_config_dword(dev, 0x40, &val);
+-	pci_set_drvdata(dev, (void *)val);
++	val |= (CTRL_P1F16 | CTRL_P0F16);
++	pci_write_config_dword(dev, 0x40, val);
+ }
+ 
+ /*
+@@ -342,7 +284,6 @@ static unsigned int __devinit init_chipset_sl82c105(struct pci_dev *dev, const c
+ 	pci_read_config_dword(dev, 0x40, &val);
+ 	val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
+ 	pci_write_config_dword(dev, 0x40, val);
+-	pci_set_drvdata(dev, (void *)val);
+ 
+ 	return dev->irq;
  }
+@@ -358,7 +299,6 @@ static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif)
+ 
+ 	hwif->set_pio_mode	= &sl82c105_set_pio_mode;
+ 	hwif->set_dma_mode	= &sl82c105_set_dma_mode;
+-	hwif->selectproc	= &sl82c105_selectproc;
+ 	hwif->resetproc 	= &sl82c105_resetproc;
+ 
+ 	if (!hwif->dma_base)
+@@ -377,10 +317,9 @@ static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif)
+ 
+ 	hwif->mwdma_mask = ATA_MWDMA2;
  
+-	hwif->ide_dma_on		= &sl82c105_ide_dma_on;
+-	hwif->dma_off_quietly		= &sl82c105_dma_off_quietly;
+ 	hwif->dma_lost_irq		= &sl82c105_dma_lost_irq;
+ 	hwif->dma_start			= &sl82c105_dma_start;
++	hwif->ide_dma_end		= &sl82c105_dma_end;
+ 	hwif->dma_timeout		= &sl82c105_dma_timeout;
+ 
+ 	if (hwif->mate)
 diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
 index eb4445b..dbbb468 100644
 --- a/drivers/ide/pci/slc90e66.c
@@ -58131,6 +64458,33 @@
  	}
  
  	triflex_timings &= ~(0xFFFF << (16 * unit));
+diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/pci/trm290.c
+index 0151d7f..04cd893 100644
+--- a/drivers/ide/pci/trm290.c
++++ b/drivers/ide/pci/trm290.c
+@@ -241,11 +241,7 @@ static int trm290_ide_dma_test_irq (ide_drive_t *drive)
+ 	return (status == 0x00ff);
+ }
+ 
+-static void trm290_dma_host_on(ide_drive_t *drive)
+-{
+-}
+-
+-static void trm290_dma_host_off(ide_drive_t *drive)
++static void trm290_dma_host_set(ide_drive_t *drive, int on)
+ {
+ }
+ 
+@@ -289,8 +285,7 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
+ 
+ 	ide_setup_dma(hwif, (hwif->config_data + 4) ^ (hwif->channel ? 0x0080 : 0x0000), 3);
+ 
+-	hwif->dma_host_off	= &trm290_dma_host_off;
+-	hwif->dma_host_on	= &trm290_dma_host_on;
++	hwif->dma_host_set	= &trm290_dma_host_set;
+ 	hwif->dma_setup 	= &trm290_dma_setup;
+ 	hwif->dma_exec_cmd	= &trm290_dma_exec_cmd;
+ 	hwif->dma_start 	= &trm290_dma_start;
 diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
 index a0d3c16..4b32c90 100644
 --- a/drivers/ide/pci/via82cxxx.c
@@ -58143,8 +64497,43 @@
  			  IDE_HFLAG_POST_SET_MODE |
  			  IDE_HFLAG_IO_32BIT |
  			  IDE_HFLAG_BOOTABLE,
+diff --git a/drivers/ide/ppc/Makefile b/drivers/ide/ppc/Makefile
+new file mode 100644
+index 0000000..65af584
+--- /dev/null
++++ b/drivers/ide/ppc/Makefile
+@@ -0,0 +1,3 @@
++
++obj-$(CONFIG_BLK_DEV_IDE_PMAC)		+= pmac.o
++obj-$(CONFIG_BLK_DEV_MPC8xx_IDE)	+= mpc8xx.o
+diff --git a/drivers/ide/ppc/mpc8xx.c b/drivers/ide/ppc/mpc8xx.c
+index 5f0da35..3fd5d45 100644
+--- a/drivers/ide/ppc/mpc8xx.c
++++ b/drivers/ide/ppc/mpc8xx.c
+@@ -838,3 +838,21 @@ void m8xx_ide_init(void)
+ 	ppc_ide_md.default_io_base      = m8xx_ide_default_io_base;
+ 	ppc_ide_md.ide_init_hwif        = m8xx_ide_init_hwif_ports;
+ }
++
++static int __init mpc8xx_ide_probe(void)
++{
++	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
++
++#ifdef IDE0_BASE_OFFSET
++	idx[0] = 0;
++#ifdef IDE1_BASE_OFFSET
++	idx[1] = 1;
++#endif
++#endif
++
++	ide_device_add(idx);
++
++	return 0;
++}
++
++module_init(mpc8xx_ide_probe);
 diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
-index 7f7a598..3dce800 100644
+index 7f7a598..736d12c 100644
 --- a/drivers/ide/ppc/pmac.c
 +++ b/drivers/ide/ppc/pmac.c
 @@ -438,13 +438,8 @@ pmac_ide_init_hwif_ports(hw_regs_t *hw,
@@ -58214,15 +64603,355 @@
  	if (ret)
  		return;
  
-diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
-index 90dc75b..511e432 100644
---- a/drivers/ieee1394/nodemgr.c
-+++ b/drivers/ieee1394/nodemgr.c
-@@ -727,33 +727,31 @@ static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
- 
- static DEFINE_MUTEX(nodemgr_serialize_remove_uds);
+@@ -1035,12 +1012,11 @@ pmac_ide_do_resume(ide_hwif_t *hwif)
+  * rare machines unfortunately, but it's better this way.
+  */
+ static int
+-pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
++pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
+ {
+ 	struct device_node *np = pmif->node;
+ 	const int *bidp;
+ 	u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+-	hw_regs_t hw;
+ 
+ 	pmif->cable_80 = 0;
+ 	pmif->broken_dma = pmif->broken_dma_warn = 0;
+@@ -1126,11 +1102,9 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
+ 	/* Tell common code _not_ to mess with resources */
+ 	hwif->mmio = 1;
+ 	hwif->hwif_data = pmif;
+-	memset(&hw, 0, sizeof(hw));
+-	pmac_ide_init_hwif_ports(&hw, pmif->regbase, 0, &hwif->irq);
+-	memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
+-	hwif->chipset = ide_pmac;
+-	hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET] || pmif->mediabay;
++	hw->chipset = ide_pmac;
++	ide_init_port_hw(hwif, hw);
++	hwif->noprobe = pmif->mediabay;
+ 	hwif->hold = pmif->mediabay;
+ 	hwif->cbl = pmif->cable_80 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
+ 	hwif->drives[0].unmask = 1;
+@@ -1159,8 +1133,6 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
+ 		hwif->noprobe = 0;
+ #endif /* CONFIG_PMAC_MEDIABAY */
  
-+static int __match_ne(struct device *dev, void *data)
+-	hwif->sg_max_nents = MAX_DCMDS;
+-
+ #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
+ 	/* has a DBDMA controller channel */
+ 	if (pmif->dma_regs)
+@@ -1186,6 +1158,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
+ 	ide_hwif_t *hwif;
+ 	pmac_ide_hwif_t *pmif;
+ 	int i, rc;
++	hw_regs_t hw;
+ 
+ 	i = 0;
+ 	while (i < MAX_HWIFS && (ide_hwifs[i].io_ports[IDE_DATA_OFFSET] != 0
+@@ -1228,7 +1201,6 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
+ 	regbase = (unsigned long) base;
+ 
+ 	hwif->pci_dev = mdev->bus->pdev;
+-	hwif->gendev.parent = &mdev->ofdev.dev;
+ 
+ 	pmif->mdev = mdev;
+ 	pmif->node = mdev->ofdev.node;
+@@ -1246,7 +1218,12 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
+ #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
+ 	dev_set_drvdata(&mdev->ofdev.dev, hwif);
+ 
+-	rc = pmac_ide_setup_device(pmif, hwif);
++	memset(&hw, 0, sizeof(hw));
++	pmac_ide_init_hwif_ports(&hw, pmif->regbase, 0, NULL);
++	hw.irq = irq;
++	hw.dev = &mdev->ofdev.dev;
++
++	rc = pmac_ide_setup_device(pmif, hwif, &hw);
+ 	if (rc != 0) {
+ 		/* The inteface is released to the common IDE layer */
+ 		dev_set_drvdata(&mdev->ofdev.dev, NULL);
+@@ -1305,6 +1282,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	void __iomem *base;
+ 	unsigned long rbase, rlen;
+ 	int i, rc;
++	hw_regs_t hw;
+ 
+ 	np = pci_device_to_OF_node(pdev);
+ 	if (np == NULL) {
+@@ -1338,7 +1316,6 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	}
+ 
+ 	hwif->pci_dev = pdev;
+-	hwif->gendev.parent = &pdev->dev;
+ 	pmif->mdev = NULL;
+ 	pmif->node = np;
+ 
+@@ -1355,7 +1332,12 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
+ 
+ 	pci_set_drvdata(pdev, hwif);
+ 
+-	rc = pmac_ide_setup_device(pmif, hwif);
++	memset(&hw, 0, sizeof(hw));
++	pmac_ide_init_hwif_ports(&hw, pmif->regbase, 0, NULL);
++	hw.irq = pdev->irq;
++	hw.dev = &pdev->dev;
++
++	rc = pmac_ide_setup_device(pmif, hwif, &hw);
+ 	if (rc != 0) {
+ 		/* The inteface is released to the common IDE layer */
+ 		pci_set_drvdata(pdev, NULL);
+@@ -1721,11 +1703,7 @@ pmac_ide_dma_test_irq (ide_drive_t *drive)
+ 	return 1;
+ }
+ 
+-static void pmac_ide_dma_host_off(ide_drive_t *drive)
+-{
+-}
+-
+-static void pmac_ide_dma_host_on(ide_drive_t *drive)
++static void pmac_ide_dma_host_set(ide_drive_t *drive, int on)
+ {
+ }
+ 
+@@ -1771,15 +1749,14 @@ pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
+ 		return;
+ 	}
+ 
+-	hwif->dma_off_quietly = &ide_dma_off_quietly;
+-	hwif->ide_dma_on = &__ide_dma_on;
++	hwif->sg_max_nents = MAX_DCMDS;
++
++	hwif->dma_host_set = &pmac_ide_dma_host_set;
+ 	hwif->dma_setup = &pmac_ide_dma_setup;
+ 	hwif->dma_exec_cmd = &pmac_ide_dma_exec_cmd;
+ 	hwif->dma_start = &pmac_ide_dma_start;
+ 	hwif->ide_dma_end = &pmac_ide_dma_end;
+ 	hwif->ide_dma_test_irq = &pmac_ide_dma_test_irq;
+-	hwif->dma_host_off = &pmac_ide_dma_host_off;
+-	hwif->dma_host_on = &pmac_ide_dma_host_on;
+ 	hwif->dma_timeout = &ide_dma_timeout;
+ 	hwif->dma_lost_irq = &pmac_ide_dma_lost_irq;
+ 
+@@ -1809,3 +1786,5 @@ pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
+ }
+ 
+ #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
++
++module_init(pmac_ide_probe);
+diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
+index d2cd5a3..676c66e 100644
+--- a/drivers/ide/setup-pci.c
++++ b/drivers/ide/setup-pci.c
+@@ -165,13 +165,17 @@ static unsigned long ide_get_or_set_dma_base(const struct ide_port_info *d, ide_
+ 
+ 		dma_base = pci_resource_start(dev, baridx);
+ 
+-		if (dma_base == 0)
++		if (dma_base == 0) {
+ 			printk(KERN_ERR "%s: DMA base is invalid\n", d->name);
++			return 0;
++		}
+ 	}
+ 
+-	if ((d->host_flags & IDE_HFLAG_CS5520) == 0 && dma_base) {
++	if (hwif->channel)
++		dma_base += 8;
++
++	if ((d->host_flags & IDE_HFLAG_CS5520) == 0) {
+ 		u8 simplex_stat = 0;
+-		dma_base += hwif->channel ? 8 : 0;
+ 
+ 		switch(dev->device) {
+ 			case PCI_DEVICE_ID_AL_M5219:
+@@ -359,6 +363,8 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev, const struct ide_port
+ 	unsigned long ctl = 0, base = 0;
+ 	ide_hwif_t *hwif;
+ 	u8 bootable = (d->host_flags & IDE_HFLAG_BOOTABLE) ? 1 : 0;
++	u8 oldnoprobe = 0;
++	struct hw_regs_s hw;
+ 
+ 	if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
+ 		/*  Possibly we should fail if these checks report true */
+@@ -381,26 +387,25 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev, const struct ide_port
+ 	}
+ 	if ((hwif = ide_match_hwif(base, bootable, d->name)) == NULL)
+ 		return NULL;	/* no room in ide_hwifs[] */
+-	if (hwif->io_ports[IDE_DATA_OFFSET] != base ||
+-	    hwif->io_ports[IDE_CONTROL_OFFSET] != (ctl | 2)) {
+-		hw_regs_t hw;
+-
+-		memset(&hw, 0, sizeof(hw));
+-#ifndef CONFIG_IDE_ARCH_OBSOLETE_INIT
+-		ide_std_init_ports(&hw, base, ctl | 2);
+-#else
+-		ide_init_hwif_ports(&hw, base, ctl | 2, NULL);
+-#endif
+-		memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
+-		hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET];
+-	}
+-	hwif->chipset = d->chipset ? d->chipset : ide_pci;
++
++	memset(&hw, 0, sizeof(hw));
++	hw.irq = hwif->irq ? hwif->irq : irq;
++	hw.dev = &dev->dev;
++	hw.chipset = d->chipset ? d->chipset : ide_pci;
++	ide_std_init_ports(&hw, base, ctl | 2);
++
++	if (hwif->io_ports[IDE_DATA_OFFSET] == base &&
++	    hwif->io_ports[IDE_CONTROL_OFFSET] == (ctl | 2))
++		oldnoprobe = hwif->noprobe;
++
++	ide_init_port_hw(hwif, &hw);
++
++	hwif->noprobe = oldnoprobe;
++
+ 	hwif->pci_dev = dev;
+ 	hwif->cds = d;
+ 	hwif->channel = port;
+ 
+-	if (!hwif->irq)
+-		hwif->irq = irq;
+ 	if (mate) {
+ 		hwif->mate = mate;
+ 		mate->mate = hwif;
+@@ -535,12 +540,8 @@ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int
+ 		if ((hwif = ide_hwif_configure(dev, d, mate, port, pciirq)) == NULL)
+ 			continue;
+ 
+-		/* setup proper ancestral information */
+-		hwif->gendev.parent = &dev->dev;
+-
+ 		*(idx + port) = hwif->index;
+ 
+-		
+ 		if (d->init_iops)
+ 			d->init_iops(hwif);
+ 
+@@ -551,8 +552,6 @@ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, int
+ 		    (d->host_flags & IDE_HFLAG_FORCE_LEGACY_IRQS))
+ 			hwif->irq = port ? 15 : 14;
+ 
+-		hwif->fixup = d->fixup;
+-
+ 		hwif->host_flags = d->host_flags;
+ 		hwif->pio_mask = d->pio_mask;
+ 
+@@ -699,105 +698,3 @@ out:
+ }
+ 
+ EXPORT_SYMBOL_GPL(ide_setup_pci_devices);
+-
+-#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
+-/*
+- *	Module interfaces
+- */
+-
+-static int pre_init = 1;		/* Before first ordered IDE scan */
+-static LIST_HEAD(ide_pci_drivers);
+-
+-/*
+- *	__ide_pci_register_driver	-	attach IDE driver
+- *	@driver: pci driver
+- *	@module: owner module of the driver
+- *
+- *	Registers a driver with the IDE layer. The IDE layer arranges that
+- *	boot time setup is done in the expected device order and then
+- *	hands the controllers off to the core PCI code to do the rest of
+- *	the work.
+- *
+- *	Returns are the same as for pci_register_driver
+- */
+-
+-int __ide_pci_register_driver(struct pci_driver *driver, struct module *module,
+-			      const char *mod_name)
+-{
+-	if (!pre_init)
+-		return __pci_register_driver(driver, module, mod_name);
+-	driver->driver.owner = module;
+-	list_add_tail(&driver->node, &ide_pci_drivers);
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(__ide_pci_register_driver);
+-
+-/**
+- *	ide_scan_pcidev		-	find an IDE driver for a device
+- *	@dev: PCI device to check
+- *
+- *	Look for an IDE driver to handle the device we are considering.
+- *	This is only used during boot up to get the ordering correct. After
+- *	boot up the pci layer takes over the job.
+- */
+-
+-static int __init ide_scan_pcidev(struct pci_dev *dev)
+-{
+-	struct list_head *l;
+-	struct pci_driver *d;
+-
+-	list_for_each(l, &ide_pci_drivers) {
+-		d = list_entry(l, struct pci_driver, node);
+-		if (d->id_table) {
+-			const struct pci_device_id *id =
+-				pci_match_id(d->id_table, dev);
+-
+-			if (id != NULL && d->probe(dev, id) >= 0) {
+-				dev->driver = d;
+-				pci_dev_get(dev);
+-				return 1;
+-			}
+-		}
+-	}
+-	return 0;
+-}
+-
+-/**
+- *	ide_scan_pcibus		-	perform the initial IDE driver scan
+- *	@scan_direction: set for reverse order scanning
+- *
+- *	Perform the initial bus rather than driver ordered scan of the
+- *	PCI drivers. After this all IDE pci handling becomes standard
+- *	module ordering not traditionally ordered.
+- */
+- 	
+-void __init ide_scan_pcibus (int scan_direction)
+-{
+-	struct pci_dev *dev = NULL;
+-	struct pci_driver *d;
+-	struct list_head *l, *n;
+-
+-	pre_init = 0;
+-	if (!scan_direction)
+-		while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)))
+-			ide_scan_pcidev(dev);
+-	else
+-		while ((dev = pci_get_device_reverse(PCI_ANY_ID, PCI_ANY_ID,
+-						     dev)))
+-			ide_scan_pcidev(dev);
+-
+-	/*
+-	 *	Hand the drivers over to the PCI layer now we
+-	 *	are post init.
+-	 */
+-
+-	list_for_each_safe(l, n, &ide_pci_drivers) {
+-		list_del(l);
+-		d = list_entry(l, struct pci_driver, node);
+-		if (__pci_register_driver(d, d->driver.owner,
+-					  d->driver.mod_name))
+-			printk(KERN_ERR "%s: failed to register %s driver\n",
+-					__FUNCTION__, d->driver.mod_name);
+-	}
+-}
+-#endif
+diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
+index 90dc75b..511e432 100644
+--- a/drivers/ieee1394/nodemgr.c
++++ b/drivers/ieee1394/nodemgr.c
+@@ -727,33 +727,31 @@ static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
+ 
+ static DEFINE_MUTEX(nodemgr_serialize_remove_uds);
+ 
++static int __match_ne(struct device *dev, void *data)
 +{
 +	struct unit_directory *ud;
 +	struct node_entry *ne = (struct node_entry *)data;
@@ -67348,6 +74077,20 @@
 -	class_device_create(adb_dev_class, NULL, MKDEV(ADB_MAJOR, 0), NULL, "adb");
 +	device_create(adb_dev_class, NULL, MKDEV(ADB_MAJOR, 0), "adb");
  }
+diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
+index 48d647a..eaba4a9 100644
+--- a/drivers/macintosh/mediabay.c
++++ b/drivers/macintosh/mediabay.c
+@@ -563,7 +563,8 @@ static void media_bay_step(int i)
+ 				ide_init_hwif_ports(&hw, (unsigned long) bay->cd_base, (unsigned long) 0, NULL);
+ 				hw.irq = bay->cd_irq;
+ 				hw.chipset = ide_pmac;
+-				bay->cd_index = ide_register_hw(&hw, NULL, 0, NULL);
++				bay->cd_index =
++					ide_register_hw(&hw, NULL, NULL);
+ 				pmu_resume();
+ 			}
+ 			if (bay->cd_index == -1) {
 diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
 index 6123c70..ac420b1 100644
 --- a/drivers/macintosh/via-pmu.c
@@ -123664,361 +130407,12452 @@
 +	spin_unlock(&device_klist->k_lock);
  }
  
- static void __init pci_insertion_sort_devices(struct pci_dev *a, struct list_head *list)
-diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
-index 5cf89a9..15c18f5 100644
---- a/drivers/pcmcia/ds.c
-+++ b/drivers/pcmcia/ds.c
-@@ -312,8 +312,7 @@ pcmcia_create_newid_file(struct pcmcia_driver *drv)
+ static void __init pci_insertion_sort_devices(struct pci_dev *a, struct list_head *list)
+diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
+index 5cf89a9..15c18f5 100644
+--- a/drivers/pcmcia/ds.c
++++ b/drivers/pcmcia/ds.c
+@@ -312,8 +312,7 @@ pcmcia_create_newid_file(struct pcmcia_driver *drv)
+ {
+ 	int error = 0;
+ 	if (drv->probe != NULL)
+-		error = sysfs_create_file(&drv->drv.kobj,
+-					  &driver_attr_new_id.attr);
++		error = driver_create_file(&drv->drv, &driver_attr_new_id);
+ 	return error;
+ }
+ 
+diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c
+index bbf3ee1..7e29b90 100644
+--- a/drivers/power/apm_power.c
++++ b/drivers/power/apm_power.c
+@@ -13,6 +13,7 @@
+ #include <linux/power_supply.h>
+ #include <linux/apm-emulation.h>
+ 
++static DEFINE_MUTEX(apm_mutex);
+ #define PSY_PROP(psy, prop, val) psy->get_property(psy, \
+ 			 POWER_SUPPLY_PROP_##prop, val)
+ 
+@@ -23,67 +24,86 @@
+ 
+ static struct power_supply *main_battery;
+ 
+-static void find_main_battery(void)
+-{
+-	struct device *dev;
+-	struct power_supply *bat = NULL;
+-	struct power_supply *max_charge_bat = NULL;
+-	struct power_supply *max_energy_bat = NULL;
++struct find_bat_param {
++	struct power_supply *main;
++	struct power_supply *bat;
++	struct power_supply *max_charge_bat;
++	struct power_supply *max_energy_bat;
+ 	union power_supply_propval full;
+-	int max_charge = 0;
+-	int max_energy = 0;
++	int max_charge;
++	int max_energy;
++};
+ 
+-	main_battery = NULL;
++static int __find_main_battery(struct device *dev, void *data)
++{
++	struct find_bat_param *bp = (struct find_bat_param *)data;
+ 
+-	list_for_each_entry(dev, &power_supply_class->devices, node) {
+-		bat = dev_get_drvdata(dev);
++	bp->bat = dev_get_drvdata(dev);
+ 
+-		if (bat->use_for_apm) {
+-			/* nice, we explicitly asked to report this battery. */
+-			main_battery = bat;
+-			return;
+-		}
++	if (bp->bat->use_for_apm) {
++		/* nice, we explicitly asked to report this battery. */
++		bp->main = bp->bat;
++		return 1;
++	}
+ 
+-		if (!PSY_PROP(bat, CHARGE_FULL_DESIGN, &full) ||
+-				!PSY_PROP(bat, CHARGE_FULL, &full)) {
+-			if (full.intval > max_charge) {
+-				max_charge_bat = bat;
+-				max_charge = full.intval;
+-			}
+-		} else if (!PSY_PROP(bat, ENERGY_FULL_DESIGN, &full) ||
+-				!PSY_PROP(bat, ENERGY_FULL, &full)) {
+-			if (full.intval > max_energy) {
+-				max_energy_bat = bat;
+-				max_energy = full.intval;
+-			}
++	if (!PSY_PROP(bp->bat, CHARGE_FULL_DESIGN, &bp->full) ||
++			!PSY_PROP(bp->bat, CHARGE_FULL, &bp->full)) {
++		if (bp->full.intval > bp->max_charge) {
++			bp->max_charge_bat = bp->bat;
++			bp->max_charge = bp->full.intval;
++		}
++	} else if (!PSY_PROP(bp->bat, ENERGY_FULL_DESIGN, &bp->full) ||
++			!PSY_PROP(bp->bat, ENERGY_FULL, &bp->full)) {
++		if (bp->full.intval > bp->max_energy) {
++			bp->max_energy_bat = bp->bat;
++			bp->max_energy = bp->full.intval;
+ 		}
+ 	}
++	return 0;
++}
++
++static void find_main_battery(void)
++{
++	struct find_bat_param bp;
++	int error;
++
++	memset(&bp, 0, sizeof(struct find_bat_param));
++	main_battery = NULL;
++	bp.main = main_battery;
++
++	error = class_for_each_device(power_supply_class, &bp,
++				      __find_main_battery);
++	if (error) {
++		main_battery = bp.main;
++		return;
++	}
+ 
+-	if ((max_energy_bat && max_charge_bat) &&
+-			(max_energy_bat != max_charge_bat)) {
++	if ((bp.max_energy_bat && bp.max_charge_bat) &&
++			(bp.max_energy_bat != bp.max_charge_bat)) {
+ 		/* try guess battery with more capacity */
+-		if (!PSY_PROP(max_charge_bat, VOLTAGE_MAX_DESIGN, &full)) {
+-			if (max_energy > max_charge * full.intval)
+-				main_battery = max_energy_bat;
++		if (!PSY_PROP(bp.max_charge_bat, VOLTAGE_MAX_DESIGN,
++			      &bp.full)) {
++			if (bp.max_energy > bp.max_charge * bp.full.intval)
++				main_battery = bp.max_energy_bat;
+ 			else
+-				main_battery = max_charge_bat;
+-		} else if (!PSY_PROP(max_energy_bat, VOLTAGE_MAX_DESIGN,
+-								  &full)) {
+-			if (max_charge > max_energy / full.intval)
+-				main_battery = max_charge_bat;
++				main_battery = bp.max_charge_bat;
++		} else if (!PSY_PROP(bp.max_energy_bat, VOLTAGE_MAX_DESIGN,
++								  &bp.full)) {
++			if (bp.max_charge > bp.max_energy / bp.full.intval)
++				main_battery = bp.max_charge_bat;
+ 			else
+-				main_battery = max_energy_bat;
++				main_battery = bp.max_energy_bat;
+ 		} else {
+ 			/* give up, choice any */
+-			main_battery = max_energy_bat;
++			main_battery = bp.max_energy_bat;
+ 		}
+-	} else if (max_charge_bat) {
+-		main_battery = max_charge_bat;
+-	} else if (max_energy_bat) {
+-		main_battery = max_energy_bat;
++	} else if (bp.max_charge_bat) {
++		main_battery = bp.max_charge_bat;
++	} else if (bp.max_energy_bat) {
++		main_battery = bp.max_energy_bat;
+ 	} else {
+ 		/* give up, try the last if any */
+-		main_battery = bat;
++		main_battery = bp.bat;
+ 	}
+ }
+ 
+@@ -207,10 +227,10 @@ static void apm_battery_apm_get_power_status(struct apm_power_info *info)
+ 	union power_supply_propval status;
+ 	union power_supply_propval capacity, time_to_full, time_to_empty;
+ 
+-	down(&power_supply_class->sem);
++	mutex_lock(&apm_mutex);
+ 	find_main_battery();
+ 	if (!main_battery) {
+-		up(&power_supply_class->sem);
++		mutex_unlock(&apm_mutex);
+ 		return;
+ 	}
+ 
+@@ -278,7 +298,7 @@ static void apm_battery_apm_get_power_status(struct apm_power_info *info)
+ 		}
+ 	}
+ 
+-	up(&power_supply_class->sem);
++	mutex_unlock(&apm_mutex);
+ }
+ 
+ static int __init apm_battery_init(void)
+diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
+index a63b75c..03d6a38 100644
+--- a/drivers/power/power_supply_core.c
++++ b/drivers/power/power_supply_core.c
+@@ -20,28 +20,29 @@
+ 
+ struct class *power_supply_class;
+ 
++static int __power_supply_changed_work(struct device *dev, void *data)
++{
++	struct power_supply *psy = (struct power_supply *)data;
++	struct power_supply *pst = dev_get_drvdata(dev);
++	int i;
++
++	for (i = 0; i < psy->num_supplicants; i++)
++		if (!strcmp(psy->supplied_to[i], pst->name)) {
++			if (pst->external_power_changed)
++				pst->external_power_changed(pst);
++		}
++	return 0;
++}
++
+ static void power_supply_changed_work(struct work_struct *work)
+ {
+ 	struct power_supply *psy = container_of(work, struct power_supply,
+ 						changed_work);
+-	int i;
+ 
+ 	dev_dbg(psy->dev, "%s\n", __FUNCTION__);
+ 
+-	for (i = 0; i < psy->num_supplicants; i++) {
+-		struct device *dev;
+-
+-		down(&power_supply_class->sem);
+-		list_for_each_entry(dev, &power_supply_class->devices, node) {
+-			struct power_supply *pst = dev_get_drvdata(dev);
+-
+-			if (!strcmp(psy->supplied_to[i], pst->name)) {
+-				if (pst->external_power_changed)
+-					pst->external_power_changed(pst);
+-			}
+-		}
+-		up(&power_supply_class->sem);
+-	}
++	class_for_each_device(power_supply_class, psy,
++			      __power_supply_changed_work);
+ 
+ 	power_supply_update_leds(psy);
+ 
+@@ -55,32 +56,35 @@ void power_supply_changed(struct power_supply *psy)
+ 	schedule_work(&psy->changed_work);
+ }
+ 
+-int power_supply_am_i_supplied(struct power_supply *psy)
++static int __power_supply_am_i_supplied(struct device *dev, void *data)
+ {
+ 	union power_supply_propval ret = {0,};
+-	struct device *dev;
+-
+-	down(&power_supply_class->sem);
+-	list_for_each_entry(dev, &power_supply_class->devices, node) {
+-		struct power_supply *epsy = dev_get_drvdata(dev);
+-		int i;
+-
+-		for (i = 0; i < epsy->num_supplicants; i++) {
+-			if (!strcmp(epsy->supplied_to[i], psy->name)) {
+-				if (epsy->get_property(epsy,
+-					  POWER_SUPPLY_PROP_ONLINE, &ret))
+-					continue;
+-				if (ret.intval)
+-					goto out;
+-			}
++	struct power_supply *psy = (struct power_supply *)data;
++	struct power_supply *epsy = dev_get_drvdata(dev);
++	int i;
++
++	for (i = 0; i < epsy->num_supplicants; i++) {
++		if (!strcmp(epsy->supplied_to[i], psy->name)) {
++			if (epsy->get_property(epsy,
++				  POWER_SUPPLY_PROP_ONLINE, &ret))
++				continue;
++			if (ret.intval)
++				return ret.intval;
+ 		}
+ 	}
+-out:
+-	up(&power_supply_class->sem);
++	return 0;
++}
++
++int power_supply_am_i_supplied(struct power_supply *psy)
++{
++	int error;
++
++	error = class_for_each_device(power_supply_class, psy,
++				      __power_supply_am_i_supplied);
+ 
+-	dev_dbg(psy->dev, "%s %d\n", __FUNCTION__, ret.intval);
++	dev_dbg(psy->dev, "%s %d\n", __FUNCTION__, error);
+ 
+-	return ret.intval;
++	return error;
+ }
+ 
+ int power_supply_register(struct device *parent, struct power_supply *psy)
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index f1e00ff..7e3ad4f 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -251,20 +251,23 @@ void rtc_update_irq(struct rtc_device *rtc,
+ }
+ EXPORT_SYMBOL_GPL(rtc_update_irq);
+ 
++static int __rtc_match(struct device *dev, void *data)
++{
++	char *name = (char *)data;
++
++	if (strncmp(dev->bus_id, name, BUS_ID_SIZE) == 0)
++		return 1;
++	return 0;
++}
++
+ struct rtc_device *rtc_class_open(char *name)
+ {
+ 	struct device *dev;
+ 	struct rtc_device *rtc = NULL;
+ 
+-	down(&rtc_class->sem);
+-	list_for_each_entry(dev, &rtc_class->devices, node) {
+-		if (strncmp(dev->bus_id, name, BUS_ID_SIZE) == 0) {
+-			dev = get_device(dev);
+-			if (dev)
+-				rtc = to_rtc_device(dev);
+-			break;
+-		}
+-	}
++	dev = class_find_device(rtc_class, name, __rtc_match);
++	if (dev)
++		rtc = to_rtc_device(dev);
+ 
+ 	if (rtc) {
+ 		if (!try_module_get(rtc->owner)) {
+@@ -272,7 +275,6 @@ struct rtc_device *rtc_class_open(char *name)
+ 			rtc = NULL;
+ 		}
+ 	}
+-	up(&rtc_class->sem);
+ 
+ 	return rtc;
+ }
+diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
+index be9f22d..0a89e08 100644
+--- a/drivers/s390/block/Makefile
++++ b/drivers/s390/block/Makefile
+@@ -2,8 +2,8 @@
+ # S/390 block devices
+ #
+ 
+-dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_9343_erp.o
+-dasd_fba_mod-objs  := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o
++dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_alias.o
++dasd_fba_mod-objs  := dasd_fba.o
+ dasd_diag_mod-objs := dasd_diag.o
+ dasd_mod-objs      := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \
+ 			dasd_genhd.o dasd_erp.o
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index e6bfce6..1db15f3 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -48,13 +48,15 @@ MODULE_LICENSE("GPL");
+ /*
+  * SECTION: prototypes for static functions of dasd.c
+  */
+-static int  dasd_alloc_queue(struct dasd_device * device);
+-static void dasd_setup_queue(struct dasd_device * device);
+-static void dasd_free_queue(struct dasd_device * device);
+-static void dasd_flush_request_queue(struct dasd_device *);
+-static int dasd_flush_ccw_queue(struct dasd_device *, int);
+-static void dasd_tasklet(struct dasd_device *);
++static int  dasd_alloc_queue(struct dasd_block *);
++static void dasd_setup_queue(struct dasd_block *);
++static void dasd_free_queue(struct dasd_block *);
++static void dasd_flush_request_queue(struct dasd_block *);
++static int dasd_flush_block_queue(struct dasd_block *);
++static void dasd_device_tasklet(struct dasd_device *);
++static void dasd_block_tasklet(struct dasd_block *);
+ static void do_kick_device(struct work_struct *);
++static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
+ 
+ /*
+  * SECTION: Operations on the device structure.
+@@ -65,26 +67,23 @@ static wait_queue_head_t dasd_flush_wq;
+ /*
+  * Allocate memory for a new device structure.
+  */
+-struct dasd_device *
+-dasd_alloc_device(void)
++struct dasd_device *dasd_alloc_device(void)
+ {
+ 	struct dasd_device *device;
+ 
+-	device = kzalloc(sizeof (struct dasd_device), GFP_ATOMIC);
+-	if (device == NULL)
++	device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
++	if (!device)
+ 		return ERR_PTR(-ENOMEM);
+-	/* open_count = 0 means device online but not in use */
+-	atomic_set(&device->open_count, -1);
+ 
+ 	/* Get two pages for normal block device operations. */
+ 	device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
+-	if (device->ccw_mem == NULL) {
++	if (!device->ccw_mem) {
+ 		kfree(device);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 	/* Get one page for error recovery. */
+ 	device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
+-	if (device->erp_mem == NULL) {
++	if (!device->erp_mem) {
+ 		free_pages((unsigned long) device->ccw_mem, 1);
+ 		kfree(device);
+ 		return ERR_PTR(-ENOMEM);
+@@ -93,10 +92,9 @@ dasd_alloc_device(void)
+ 	dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
+ 	dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
+ 	spin_lock_init(&device->mem_lock);
+-	spin_lock_init(&device->request_queue_lock);
+-	atomic_set (&device->tasklet_scheduled, 0);
++	atomic_set(&device->tasklet_scheduled, 0);
+ 	tasklet_init(&device->tasklet,
+-		     (void (*)(unsigned long)) dasd_tasklet,
++		     (void (*)(unsigned long)) dasd_device_tasklet,
+ 		     (unsigned long) device);
+ 	INIT_LIST_HEAD(&device->ccw_queue);
+ 	init_timer(&device->timer);
+@@ -110,8 +108,7 @@ dasd_alloc_device(void)
+ /*
+  * Free memory of a device structure.
+  */
+-void
+-dasd_free_device(struct dasd_device *device)
++void dasd_free_device(struct dasd_device *device)
+ {
+ 	kfree(device->private);
+ 	free_page((unsigned long) device->erp_mem);
+@@ -120,10 +117,42 @@ dasd_free_device(struct dasd_device *device)
+ }
+ 
+ /*
++ * Allocate memory for a new device structure.
++ */
++struct dasd_block *dasd_alloc_block(void)
++{
++	struct dasd_block *block;
++
++	block = kzalloc(sizeof(*block), GFP_ATOMIC);
++	if (!block)
++		return ERR_PTR(-ENOMEM);
++	/* open_count = 0 means device online but not in use */
++	atomic_set(&block->open_count, -1);
++
++	spin_lock_init(&block->request_queue_lock);
++	atomic_set(&block->tasklet_scheduled, 0);
++	tasklet_init(&block->tasklet,
++		     (void (*)(unsigned long)) dasd_block_tasklet,
++		     (unsigned long) block);
++	INIT_LIST_HEAD(&block->ccw_queue);
++	spin_lock_init(&block->queue_lock);
++	init_timer(&block->timer);
++
++	return block;
++}
++
++/*
++ * Free memory of a device structure.
++ */
++void dasd_free_block(struct dasd_block *block)
++{
++	kfree(block);
++}
++
++/*
+  * Make a new device known to the system.
+  */
+-static int
+-dasd_state_new_to_known(struct dasd_device *device)
++static int dasd_state_new_to_known(struct dasd_device *device)
+ {
+ 	int rc;
+ 
+@@ -133,12 +162,13 @@ dasd_state_new_to_known(struct dasd_device *device)
+ 	 */
+ 	dasd_get_device(device);
+ 
+-	rc = dasd_alloc_queue(device);
+-	if (rc) {
+-		dasd_put_device(device);
+-		return rc;
++	if (device->block) {
++		rc = dasd_alloc_queue(device->block);
++		if (rc) {
++			dasd_put_device(device);
++			return rc;
++		}
+ 	}
+-
+ 	device->state = DASD_STATE_KNOWN;
+ 	return 0;
+ }
+@@ -146,21 +176,24 @@ dasd_state_new_to_known(struct dasd_device *device)
+ /*
+  * Let the system forget about a device.
+  */
+-static int
+-dasd_state_known_to_new(struct dasd_device * device)
++static int dasd_state_known_to_new(struct dasd_device *device)
+ {
+ 	/* Disable extended error reporting for this device. */
+ 	dasd_eer_disable(device);
+ 	/* Forget the discipline information. */
+-	if (device->discipline)
++	if (device->discipline) {
++		if (device->discipline->uncheck_device)
++			device->discipline->uncheck_device(device);
+ 		module_put(device->discipline->owner);
++	}
+ 	device->discipline = NULL;
+ 	if (device->base_discipline)
+ 		module_put(device->base_discipline->owner);
+ 	device->base_discipline = NULL;
+ 	device->state = DASD_STATE_NEW;
+ 
+-	dasd_free_queue(device);
++	if (device->block)
++		dasd_free_queue(device->block);
+ 
+ 	/* Give up reference we took in dasd_state_new_to_known. */
+ 	dasd_put_device(device);
+@@ -170,19 +203,19 @@ dasd_state_known_to_new(struct dasd_device * device)
+ /*
+  * Request the irq line for the device.
+  */
+-static int
+-dasd_state_known_to_basic(struct dasd_device * device)
++static int dasd_state_known_to_basic(struct dasd_device *device)
+ {
+ 	int rc;
+ 
+ 	/* Allocate and register gendisk structure. */
+-	rc = dasd_gendisk_alloc(device);
+-	if (rc)
+-		return rc;
+-
++	if (device->block) {
++		rc = dasd_gendisk_alloc(device->block);
++		if (rc)
++			return rc;
++	}
+ 	/* register 'device' debug area, used for all DBF_DEV_XXX calls */
+-	device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2,
+-					    8 * sizeof (long));
++	device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 1,
++					    8 * sizeof(long));
+ 	debug_register_view(device->debug_area, &debug_sprintf_view);
+ 	debug_set_level(device->debug_area, DBF_WARNING);
+ 	DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
+@@ -194,16 +227,17 @@ dasd_state_known_to_basic(struct dasd_device * device)
+ /*
+  * Release the irq line for the device. Terminate any running i/o.
+  */
+-static int
+-dasd_state_basic_to_known(struct dasd_device * device)
++static int dasd_state_basic_to_known(struct dasd_device *device)
+ {
+ 	int rc;
+-
+-	dasd_gendisk_free(device);
+-	rc = dasd_flush_ccw_queue(device, 1);
++	if (device->block) {
++		dasd_gendisk_free(device->block);
++		dasd_block_clear_timer(device->block);
++	}
++	rc = dasd_flush_device_queue(device);
+ 	if (rc)
+ 		return rc;
+-	dasd_clear_timer(device);
++	dasd_device_clear_timer(device);
+ 
+ 	DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
+ 	if (device->debug_area != NULL) {
+@@ -228,26 +262,32 @@ dasd_state_basic_to_known(struct dasd_device * device)
+  * In case the analysis returns an error, the device setup is stopped
+  * (a fake disk was already added to allow formatting).
+  */
+-static int
+-dasd_state_basic_to_ready(struct dasd_device * device)
++static int dasd_state_basic_to_ready(struct dasd_device *device)
+ {
+ 	int rc;
++	struct dasd_block *block;
+ 
+ 	rc = 0;
+-	if (device->discipline->do_analysis != NULL)
+-		rc = device->discipline->do_analysis(device);
+-	if (rc) {
+-		if (rc != -EAGAIN)
+-			device->state = DASD_STATE_UNFMT;
+-		return rc;
+-	}
++	block = device->block;
+ 	/* make disk known with correct capacity */
+-	dasd_setup_queue(device);
+-	set_capacity(device->gdp, device->blocks << device->s2b_shift);
+-	device->state = DASD_STATE_READY;
+-	rc = dasd_scan_partitions(device);
+-	if (rc)
+-		device->state = DASD_STATE_BASIC;
++	if (block) {
++		if (block->base->discipline->do_analysis != NULL)
++			rc = block->base->discipline->do_analysis(block);
++		if (rc) {
++			if (rc != -EAGAIN)
++				device->state = DASD_STATE_UNFMT;
++			return rc;
++		}
++		dasd_setup_queue(block);
++		set_capacity(block->gdp,
++			     block->blocks << block->s2b_shift);
++		device->state = DASD_STATE_READY;
++		rc = dasd_scan_partitions(block);
++		if (rc)
++			device->state = DASD_STATE_BASIC;
++	} else {
++		device->state = DASD_STATE_READY;
++	}
+ 	return rc;
+ }
+ 
+@@ -256,28 +296,31 @@ dasd_state_basic_to_ready(struct dasd_device * device)
+  * Forget format information. Check if the target level is basic
+  * and if it is create fake disk for formatting.
+  */
+-static int
+-dasd_state_ready_to_basic(struct dasd_device * device)
++static int dasd_state_ready_to_basic(struct dasd_device *device)
+ {
+ 	int rc;
+ 
+-	rc = dasd_flush_ccw_queue(device, 0);
+-	if (rc)
+-		return rc;
+-	dasd_destroy_partitions(device);
+-	dasd_flush_request_queue(device);
+-	device->blocks = 0;
+-	device->bp_block = 0;
+-	device->s2b_shift = 0;
+ 	device->state = DASD_STATE_BASIC;
++	if (device->block) {
++		struct dasd_block *block = device->block;
++		rc = dasd_flush_block_queue(block);
++		if (rc) {
++			device->state = DASD_STATE_READY;
++			return rc;
++		}
++		dasd_destroy_partitions(block);
++		dasd_flush_request_queue(block);
++		block->blocks = 0;
++		block->bp_block = 0;
++		block->s2b_shift = 0;
++	}
+ 	return 0;
+ }
+ 
+ /*
+  * Back to basic.
+  */
+-static int
+-dasd_state_unfmt_to_basic(struct dasd_device * device)
++static int dasd_state_unfmt_to_basic(struct dasd_device *device)
+ {
+ 	device->state = DASD_STATE_BASIC;
+ 	return 0;
+@@ -291,17 +334,31 @@ dasd_state_unfmt_to_basic(struct dasd_device * device)
+ static int
+ dasd_state_ready_to_online(struct dasd_device * device)
+ {
++	int rc;
++
++	if (device->discipline->ready_to_online) {
++		rc = device->discipline->ready_to_online(device);
++		if (rc)
++			return rc;
++	}
+ 	device->state = DASD_STATE_ONLINE;
+-	dasd_schedule_bh(device);
++	if (device->block)
++		dasd_schedule_block_bh(device->block);
+ 	return 0;
+ }
+ 
+ /*
+  * Stop the requeueing of requests again.
+  */
+-static int
+-dasd_state_online_to_ready(struct dasd_device * device)
++static int dasd_state_online_to_ready(struct dasd_device *device)
+ {
++	int rc;
++
++	if (device->discipline->online_to_ready) {
++		rc = device->discipline->online_to_ready(device);
++		if (rc)
++			return rc;
++	}
+ 	device->state = DASD_STATE_READY;
+ 	return 0;
+ }
+@@ -309,8 +366,7 @@ dasd_state_online_to_ready(struct dasd_device * device)
+ /*
+  * Device startup state changes.
+  */
+-static int
+-dasd_increase_state(struct dasd_device *device)
++static int dasd_increase_state(struct dasd_device *device)
+ {
+ 	int rc;
+ 
+@@ -345,8 +401,7 @@ dasd_increase_state(struct dasd_device *device)
+ /*
+  * Device shutdown state changes.
+  */
+-static int
+-dasd_decrease_state(struct dasd_device *device)
++static int dasd_decrease_state(struct dasd_device *device)
+ {
+ 	int rc;
+ 
+@@ -381,8 +436,7 @@ dasd_decrease_state(struct dasd_device *device)
+ /*
+  * This is the main startup/shutdown routine.
+  */
+-static void
+-dasd_change_state(struct dasd_device *device)
++static void dasd_change_state(struct dasd_device *device)
+ {
+         int rc;
+ 
+@@ -409,17 +463,15 @@ dasd_change_state(struct dasd_device *device)
+  * dasd_kick_device will schedule a call do do_kick_device to the kernel
+  * event daemon.
+  */
+-static void
+-do_kick_device(struct work_struct *work)
++static void do_kick_device(struct work_struct *work)
+ {
+ 	struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
+ 	dasd_change_state(device);
+-	dasd_schedule_bh(device);
++	dasd_schedule_device_bh(device);
+ 	dasd_put_device(device);
+ }
+ 
+-void
+-dasd_kick_device(struct dasd_device *device)
++void dasd_kick_device(struct dasd_device *device)
+ {
+ 	dasd_get_device(device);
+ 	/* queue call to dasd_kick_device to the kernel event daemon. */
+@@ -429,8 +481,7 @@ dasd_kick_device(struct dasd_device *device)
+ /*
+  * Set the target state for a device and starts the state change.
+  */
+-void
+-dasd_set_target_state(struct dasd_device *device, int target)
++void dasd_set_target_state(struct dasd_device *device, int target)
+ {
+ 	/* If we are in probeonly mode stop at DASD_STATE_READY. */
+ 	if (dasd_probeonly && target > DASD_STATE_READY)
+@@ -447,14 +498,12 @@ dasd_set_target_state(struct dasd_device *device, int target)
+ /*
+  * Enable devices with device numbers in [from..to].
+  */
+-static inline int
+-_wait_for_device(struct dasd_device *device)
++static inline int _wait_for_device(struct dasd_device *device)
+ {
+ 	return (device->state == device->target);
+ }
+ 
+-void
+-dasd_enable_device(struct dasd_device *device)
++void dasd_enable_device(struct dasd_device *device)
+ {
+ 	dasd_set_target_state(device, DASD_STATE_ONLINE);
+ 	if (device->state <= DASD_STATE_KNOWN)
+@@ -475,20 +524,20 @@ unsigned int dasd_profile_level = DASD_PROFILE_OFF;
+ /*
+  * Increments counter in global and local profiling structures.
+  */
+-#define dasd_profile_counter(value, counter, device) \
++#define dasd_profile_counter(value, counter, block) \
+ { \
+ 	int index; \
+ 	for (index = 0; index < 31 && value >> (2+index); index++); \
+ 	dasd_global_profile.counter[index]++; \
+-	device->profile.counter[index]++; \
++	block->profile.counter[index]++; \
+ }
+ 
+ /*
+  * Add profiling information for cqr before execution.
+  */
+-static void
+-dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
+-		   struct request *req)
++static void dasd_profile_start(struct dasd_block *block,
++			       struct dasd_ccw_req *cqr,
++			       struct request *req)
+ {
+ 	struct list_head *l;
+ 	unsigned int counter;
+@@ -498,19 +547,19 @@ dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
+ 
+ 	/* count the length of the chanq for statistics */
+ 	counter = 0;
+-	list_for_each(l, &device->ccw_queue)
++	list_for_each(l, &block->ccw_queue)
+ 		if (++counter >= 31)
+ 			break;
+ 	dasd_global_profile.dasd_io_nr_req[counter]++;
+-	device->profile.dasd_io_nr_req[counter]++;
++	block->profile.dasd_io_nr_req[counter]++;
+ }
+ 
+ /*
+  * Add profiling information for cqr after execution.
+  */
+-static void
+-dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
+-		 struct request *req)
++static void dasd_profile_end(struct dasd_block *block,
++			     struct dasd_ccw_req *cqr,
++			     struct request *req)
+ {
+ 	long strtime, irqtime, endtime, tottime;	/* in microseconds */
+ 	long tottimeps, sectors;
+@@ -532,27 +581,27 @@ dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
+ 
+ 	if (!dasd_global_profile.dasd_io_reqs)
+ 		memset(&dasd_global_profile, 0,
+-		       sizeof (struct dasd_profile_info_t));
++		       sizeof(struct dasd_profile_info_t));
+ 	dasd_global_profile.dasd_io_reqs++;
+ 	dasd_global_profile.dasd_io_sects += sectors;
+ 
+-	if (!device->profile.dasd_io_reqs)
+-		memset(&device->profile, 0,
+-		       sizeof (struct dasd_profile_info_t));
+-	device->profile.dasd_io_reqs++;
+-	device->profile.dasd_io_sects += sectors;
++	if (!block->profile.dasd_io_reqs)
++		memset(&block->profile, 0,
++		       sizeof(struct dasd_profile_info_t));
++	block->profile.dasd_io_reqs++;
++	block->profile.dasd_io_sects += sectors;
+ 
+-	dasd_profile_counter(sectors, dasd_io_secs, device);
+-	dasd_profile_counter(tottime, dasd_io_times, device);
+-	dasd_profile_counter(tottimeps, dasd_io_timps, device);
+-	dasd_profile_counter(strtime, dasd_io_time1, device);
+-	dasd_profile_counter(irqtime, dasd_io_time2, device);
+-	dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device);
+-	dasd_profile_counter(endtime, dasd_io_time3, device);
++	dasd_profile_counter(sectors, dasd_io_secs, block);
++	dasd_profile_counter(tottime, dasd_io_times, block);
++	dasd_profile_counter(tottimeps, dasd_io_timps, block);
++	dasd_profile_counter(strtime, dasd_io_time1, block);
++	dasd_profile_counter(irqtime, dasd_io_time2, block);
++	dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block);
++	dasd_profile_counter(endtime, dasd_io_time3, block);
+ }
+ #else
+-#define dasd_profile_start(device, cqr, req) do {} while (0)
+-#define dasd_profile_end(device, cqr, req) do {} while (0)
++#define dasd_profile_start(block, cqr, req) do {} while (0)
++#define dasd_profile_end(block, cqr, req) do {} while (0)
+ #endif				/* CONFIG_DASD_PROFILE */
+ 
+ /*
+@@ -562,9 +611,9 @@ dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
+  * memory and 2) dasd_smalloc_request uses the static ccw memory
+  * that gets allocated for each device.
+  */
+-struct dasd_ccw_req *
+-dasd_kmalloc_request(char *magic, int cplength, int datasize,
+-		   struct dasd_device * device)
++struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength,
++					  int datasize,
++					  struct dasd_device *device)
+ {
+ 	struct dasd_ccw_req *cqr;
+ 
+@@ -600,9 +649,9 @@ dasd_kmalloc_request(char *magic, int cplength, int datasize,
+ 	return cqr;
+ }
+ 
+-struct dasd_ccw_req *
+-dasd_smalloc_request(char *magic, int cplength, int datasize,
+-		   struct dasd_device * device)
++struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
++					  int datasize,
++					  struct dasd_device *device)
+ {
+ 	unsigned long flags;
+ 	struct dasd_ccw_req *cqr;
+@@ -649,8 +698,7 @@ dasd_smalloc_request(char *magic, int cplength, int datasize,
+  * idal lists that might have been created by dasd_set_cda and the
+  * struct dasd_ccw_req itself.
+  */
+-void
+-dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
++void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
+ {
+ #ifdef CONFIG_64BIT
+ 	struct ccw1 *ccw;
+@@ -667,8 +715,7 @@ dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
+ 	dasd_put_device(device);
+ }
+ 
+-void
+-dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
++void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
+ {
+ 	unsigned long flags;
+ 
+@@ -681,14 +728,13 @@ dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
+ /*
+  * Check discipline magic in cqr.
+  */
+-static inline int
+-dasd_check_cqr(struct dasd_ccw_req *cqr)
++static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
+ {
+ 	struct dasd_device *device;
+ 
+ 	if (cqr == NULL)
+ 		return -EINVAL;
+-	device = cqr->device;
++	device = cqr->startdev;
+ 	if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
+ 		DEV_MESSAGE(KERN_WARNING, device,
+ 			    " dasd_ccw_req 0x%08x magic doesn't match"
+@@ -706,8 +752,7 @@ dasd_check_cqr(struct dasd_ccw_req *cqr)
+  * ccw_device_clear can fail if the i/o subsystem
+  * is in a bad mood.
+  */
+-int
+-dasd_term_IO(struct dasd_ccw_req * cqr)
++int dasd_term_IO(struct dasd_ccw_req *cqr)
+ {
+ 	struct dasd_device *device;
+ 	int retries, rc;
+@@ -717,13 +762,13 @@ dasd_term_IO(struct dasd_ccw_req * cqr)
+ 	if (rc)
+ 		return rc;
+ 	retries = 0;
+-	device = (struct dasd_device *) cqr->device;
++	device = (struct dasd_device *) cqr->startdev;
+ 	while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
+ 		rc = ccw_device_clear(device->cdev, (long) cqr);
+ 		switch (rc) {
+ 		case 0:	/* termination successful */
+ 			cqr->retries--;
+-			cqr->status = DASD_CQR_CLEAR;
++			cqr->status = DASD_CQR_CLEAR_PENDING;
+ 			cqr->stopclk = get_clock();
+ 			cqr->starttime = 0;
+ 			DBF_DEV_EVENT(DBF_DEBUG, device,
+@@ -753,7 +798,7 @@ dasd_term_IO(struct dasd_ccw_req * cqr)
+ 		}
+ 		retries++;
+ 	}
+-	dasd_schedule_bh(device);
++	dasd_schedule_device_bh(device);
+ 	return rc;
+ }
+ 
+@@ -761,8 +806,7 @@ dasd_term_IO(struct dasd_ccw_req * cqr)
+  * Start the i/o. This start_IO can fail if the channel is really busy.
+  * In that case set up a timer to start the request later.
+  */
+-int
+-dasd_start_IO(struct dasd_ccw_req * cqr)
++int dasd_start_IO(struct dasd_ccw_req *cqr)
+ {
+ 	struct dasd_device *device;
+ 	int rc;
+@@ -771,12 +815,12 @@ dasd_start_IO(struct dasd_ccw_req * cqr)
+ 	rc = dasd_check_cqr(cqr);
+ 	if (rc)
+ 		return rc;
+-	device = (struct dasd_device *) cqr->device;
++	device = (struct dasd_device *) cqr->startdev;
+ 	if (cqr->retries < 0) {
+ 		DEV_MESSAGE(KERN_DEBUG, device,
+ 			    "start_IO: request %p (%02x/%i) - no retry left.",
+ 			    cqr, cqr->status, cqr->retries);
+-		cqr->status = DASD_CQR_FAILED;
++		cqr->status = DASD_CQR_ERROR;
+ 		return -EIO;
+ 	}
+ 	cqr->startclk = get_clock();
+@@ -833,8 +877,7 @@ dasd_start_IO(struct dasd_ccw_req * cqr)
+  * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
+  * DASD_CQR_QUEUED for 2) and 3).
+  */
+-static void
+-dasd_timeout_device(unsigned long ptr)
++static void dasd_device_timeout(unsigned long ptr)
+ {
+ 	unsigned long flags;
+ 	struct dasd_device *device;
+@@ -844,14 +887,13 @@ dasd_timeout_device(unsigned long ptr)
+ 	/* re-activate request queue */
+         device->stopped &= ~DASD_STOPPED_PENDING;
+ 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+-	dasd_schedule_bh(device);
++	dasd_schedule_device_bh(device);
+ }
+ 
+ /*
+  * Setup timeout for a device in jiffies.
+  */
+-void
+-dasd_set_timer(struct dasd_device *device, int expires)
++void dasd_device_set_timer(struct dasd_device *device, int expires)
+ {
+ 	if (expires == 0) {
+ 		if (timer_pending(&device->timer))
+@@ -862,7 +904,7 @@ dasd_set_timer(struct dasd_device *device, int expires)
+ 		if (mod_timer(&device->timer, jiffies + expires))
+ 			return;
+ 	}
+-	device->timer.function = dasd_timeout_device;
++	device->timer.function = dasd_device_timeout;
+ 	device->timer.data = (unsigned long) device;
+ 	device->timer.expires = jiffies + expires;
+ 	add_timer(&device->timer);
+@@ -871,15 +913,14 @@ dasd_set_timer(struct dasd_device *device, int expires)
+ /*
+  * Clear timeout for a device.
+  */
+-void
+-dasd_clear_timer(struct dasd_device *device)
++void dasd_device_clear_timer(struct dasd_device *device)
+ {
+ 	if (timer_pending(&device->timer))
+ 		del_timer(&device->timer);
+ }
+ 
+-static void
+-dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
++static void dasd_handle_killed_request(struct ccw_device *cdev,
++				       unsigned long intparm)
+ {
+ 	struct dasd_ccw_req *cqr;
+ 	struct dasd_device *device;
+@@ -893,7 +934,7 @@ dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
+ 		return;
+ 	}
+ 
+-	device = (struct dasd_device *) cqr->device;
++	device = (struct dasd_device *) cqr->startdev;
+ 	if (device == NULL ||
+ 	    device != dasd_device_from_cdev_locked(cdev) ||
+ 	    strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
+@@ -905,46 +946,32 @@ dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
+ 	/* Schedule request to be retried. */
+ 	cqr->status = DASD_CQR_QUEUED;
+ 
+-	dasd_clear_timer(device);
+-	dasd_schedule_bh(device);
++	dasd_device_clear_timer(device);
++	dasd_schedule_device_bh(device);
+ 	dasd_put_device(device);
+ }
+ 
+-static void
+-dasd_handle_state_change_pending(struct dasd_device *device)
++void dasd_generic_handle_state_change(struct dasd_device *device)
+ {
+-	struct dasd_ccw_req *cqr;
+-	struct list_head *l, *n;
+-
+ 	/* First of all start sense subsystem status request. */
+ 	dasd_eer_snss(device);
+ 
+ 	device->stopped &= ~DASD_STOPPED_PENDING;
+-
+-        /* restart all 'running' IO on queue */
+-	list_for_each_safe(l, n, &device->ccw_queue) {
+-		cqr = list_entry(l, struct dasd_ccw_req, list);
+-                if (cqr->status == DASD_CQR_IN_IO) {
+-                        cqr->status = DASD_CQR_QUEUED;
+-		}
+-        }
+-	dasd_clear_timer(device);
+-	dasd_schedule_bh(device);
++	dasd_schedule_device_bh(device);
++	if (device->block)
++		dasd_schedule_block_bh(device->block);
+ }
+ 
+ /*
+  * Interrupt handler for "normal" ssch-io based dasd devices.
+  */
+-void
+-dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+-		 struct irb *irb)
++void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
++		      struct irb *irb)
+ {
+ 	struct dasd_ccw_req *cqr, *next;
+ 	struct dasd_device *device;
+ 	unsigned long long now;
+ 	int expires;
+-	dasd_era_t era;
+-	char mask;
+ 
+ 	if (IS_ERR(irb)) {
+ 		switch (PTR_ERR(irb)) {
+@@ -969,29 +996,25 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ 		  cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat),
+ 		  (unsigned int) intparm);
+ 
+-	/* first of all check for state change pending interrupt */
+-	mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
+-	if ((irb->scsw.dstat & mask) == mask) {
++	/* check for unsolicited interrupts */
++	cqr = (struct dasd_ccw_req *) intparm;
++	if (!cqr || ((irb->scsw.cc == 1) &&
++		     (irb->scsw.fctl & SCSW_FCTL_START_FUNC) &&
++		     (irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) ) {
++		if (cqr && cqr->status == DASD_CQR_IN_IO)
++			cqr->status = DASD_CQR_QUEUED;
+ 		device = dasd_device_from_cdev_locked(cdev);
+ 		if (!IS_ERR(device)) {
+-			dasd_handle_state_change_pending(device);
++			dasd_device_clear_timer(device);
++			device->discipline->handle_unsolicited_interrupt(device,
++									 irb);
+ 			dasd_put_device(device);
+ 		}
+ 		return;
+ 	}
+ 
+-	cqr = (struct dasd_ccw_req *) intparm;
+-
+-	/* check for unsolicited interrupts */
+-	if (cqr == NULL) {
+-		MESSAGE(KERN_DEBUG,
+-			"unsolicited interrupt received: bus_id %s",
+-			cdev->dev.bus_id);
+-		return;
+-	}
+-
+-	device = (struct dasd_device *) cqr->device;
+-	if (device == NULL ||
++	device = (struct dasd_device *) cqr->startdev;
++	if (!device ||
+ 	    strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
+ 		MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
+ 			cdev->dev.bus_id);
+@@ -999,12 +1022,12 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ 	}
+ 
+ 	/* Check for clear pending */
+-	if (cqr->status == DASD_CQR_CLEAR &&
++	if (cqr->status == DASD_CQR_CLEAR_PENDING &&
+ 	    irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
+-		cqr->status = DASD_CQR_QUEUED;
+-		dasd_clear_timer(device);
++		cqr->status = DASD_CQR_CLEARED;
++		dasd_device_clear_timer(device);
+ 		wake_up(&dasd_flush_wq);
+-		dasd_schedule_bh(device);
++		dasd_schedule_device_bh(device);
+ 		return;
+ 	}
+ 
+@@ -1017,277 +1040,170 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ 	}
+ 	DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
+ 		      ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
+-
+- 	/* Find out the appropriate era_action. */
+-	if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC)
+-		era = dasd_era_fatal;
+-	else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
+-		 irb->scsw.cstat == 0 &&
+-		 !irb->esw.esw0.erw.cons)
+-		era = dasd_era_none;
+-	else if (irb->esw.esw0.erw.cons)
+-		era = device->discipline->examine_error(cqr, irb);
+-	else
+-		era = dasd_era_recover;
+-
+-	DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era);
++	next = NULL;
+ 	expires = 0;
+-	if (era == dasd_era_none) {
+-		cqr->status = DASD_CQR_DONE;
++	if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
++	    irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) {
++		/* request was completed successfully */
++		cqr->status = DASD_CQR_SUCCESS;
+ 		cqr->stopclk = now;
+ 		/* Start first request on queue if possible -> fast_io. */
+-		if (cqr->list.next != &device->ccw_queue) {
+-			next = list_entry(cqr->list.next,
+-					  struct dasd_ccw_req, list);
+-			if ((next->status == DASD_CQR_QUEUED) &&
+-			    (!device->stopped)) {
+-				if (device->discipline->start_IO(next) == 0)
+-					expires = next->expires;
+-				else
+-					DEV_MESSAGE(KERN_DEBUG, device, "%s",
+-						    "Interrupt fastpath "
+-						    "failed!");
+-			}
++		if (cqr->devlist.next != &device->ccw_queue) {
++			next = list_entry(cqr->devlist.next,
++					  struct dasd_ccw_req, devlist);
+ 		}
+-	} else {		/* error */
+-		memcpy(&cqr->irb, irb, sizeof (struct irb));
++	} else {  /* error */
++		memcpy(&cqr->irb, irb, sizeof(struct irb));
+ 		if (device->features & DASD_FEATURE_ERPLOG) {
+-			/* dump sense data */
+ 			dasd_log_sense(cqr, irb);
+ 		}
+-		switch (era) {
+-		case dasd_era_fatal:
+-			cqr->status = DASD_CQR_FAILED;
+-			cqr->stopclk = now;
+-			break;
+-		case dasd_era_recover:
++		/* If we have no sense data, or we just don't want complex ERP
++		 * for this request, but if we have retries left, then just
++		 * reset this request and retry it in the fastpath
++		 */
++		if (!(cqr->irb.esw.esw0.erw.cons &&
++		      test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) &&
++		    cqr->retries > 0) {
++			DEV_MESSAGE(KERN_DEBUG, device,
++				    "default ERP in fastpath (%i retries left)",
++				    cqr->retries);
++			cqr->lpm    = LPM_ANYPATH;
++			cqr->status = DASD_CQR_QUEUED;
++			next = cqr;
++		} else
+ 			cqr->status = DASD_CQR_ERROR;
+-			break;
+-		default:
+-			BUG();
+-		}
++	}
++	if (next && (next->status == DASD_CQR_QUEUED) &&
++	    (!device->stopped)) {
++		if (device->discipline->start_IO(next) == 0)
++			expires = next->expires;
++		else
++			DEV_MESSAGE(KERN_DEBUG, device, "%s",
++				    "Interrupt fastpath "
++				    "failed!");
+ 	}
+ 	if (expires != 0)
+-		dasd_set_timer(device, expires);
++		dasd_device_set_timer(device, expires);
+ 	else
+-		dasd_clear_timer(device);
+-	dasd_schedule_bh(device);
++		dasd_device_clear_timer(device);
++	dasd_schedule_device_bh(device);
+ }
+ 
+ /*
+- * posts the buffer_cache about a finalized request
++ * If we have an error on a dasd_block layer request then we cancel
++ * and return all further requests from the same dasd_block as well.
+  */
+-static inline void
+-dasd_end_request(struct request *req, int uptodate)
++static void __dasd_device_recovery(struct dasd_device *device,
++				   struct dasd_ccw_req *ref_cqr)
+ {
+-	if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
+-		BUG();
+-	add_disk_randomness(req->rq_disk);
+-	end_that_request_last(req, uptodate);
+-}
++	struct list_head *l, *n;
++	struct dasd_ccw_req *cqr;
+ 
+-/*
+- * Process finished error recovery ccw.
+- */
+-static inline void
+-__dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr)
+-{
+-	dasd_erp_fn_t erp_fn;
++	/*
++	 * only requeue request that came from the dasd_block layer
++	 */
++	if (!ref_cqr->block)
++		return;
+ 
+-	if (cqr->status == DASD_CQR_DONE)
+-		DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
+-	else
+-		DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
+-	erp_fn = device->discipline->erp_postaction(cqr);
+-	erp_fn(cqr);
+-}
++	list_for_each_safe(l, n, &device->ccw_queue) {
++		cqr = list_entry(l, struct dasd_ccw_req, devlist);
++		if (cqr->status == DASD_CQR_QUEUED &&
++		    ref_cqr->block == cqr->block) {
++			cqr->status = DASD_CQR_CLEARED;
++		}
++	}
++};
+ 
+ /*
+- * Process ccw request queue.
++ * Remove those ccw requests from the queue that need to be returned
++ * to the upper layer.
+  */
+-static void
+-__dasd_process_ccw_queue(struct dasd_device * device,
+-			 struct list_head *final_queue)
++static void __dasd_device_process_ccw_queue(struct dasd_device *device,
++					    struct list_head *final_queue)
+ {
+ 	struct list_head *l, *n;
+ 	struct dasd_ccw_req *cqr;
+-	dasd_erp_fn_t erp_fn;
+ 
+-restart:
+ 	/* Process request with final status. */
+ 	list_for_each_safe(l, n, &device->ccw_queue) {
+-		cqr = list_entry(l, struct dasd_ccw_req, list);
++		cqr = list_entry(l, struct dasd_ccw_req, devlist);
++
+ 		/* Stop list processing at the first non-final request. */
+-		if (cqr->status != DASD_CQR_DONE &&
+-		    cqr->status != DASD_CQR_FAILED &&
+-		    cqr->status != DASD_CQR_ERROR)
++		if (cqr->status == DASD_CQR_QUEUED ||
++		    cqr->status == DASD_CQR_IN_IO ||
++		    cqr->status == DASD_CQR_CLEAR_PENDING)
+ 			break;
+-		/*  Process requests with DASD_CQR_ERROR */
+ 		if (cqr->status == DASD_CQR_ERROR) {
+-			if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) {
+-				cqr->status = DASD_CQR_FAILED;
+-				cqr->stopclk = get_clock();
+-			} else {
+-				if (cqr->irb.esw.esw0.erw.cons &&
+-				    test_bit(DASD_CQR_FLAGS_USE_ERP,
+-					     &cqr->flags)) {
+-					erp_fn = device->discipline->
+-						erp_action(cqr);
+-					erp_fn(cqr);
+-				} else
+-					dasd_default_erp_action(cqr);
+-			}
+-			goto restart;
+-		}
+-
+-		/* First of all call extended error reporting. */
+-		if (dasd_eer_enabled(device) &&
+-		    cqr->status == DASD_CQR_FAILED) {
+-			dasd_eer_write(device, cqr, DASD_EER_FATALERROR);
+-
+-			/* restart request  */
+-			cqr->status = DASD_CQR_QUEUED;
+-			cqr->retries = 255;
+-			device->stopped |= DASD_STOPPED_QUIESCE;
+-			goto restart;
++			__dasd_device_recovery(device, cqr);
+ 		}
+-
+-		/* Process finished ERP request. */
+-		if (cqr->refers) {
+-			__dasd_process_erp(device, cqr);
+-			goto restart;
+-		}
+-
+ 		/* Rechain finished requests to final queue */
+-		cqr->endclk = get_clock();
+-		list_move_tail(&cqr->list, final_queue);
++		list_move_tail(&cqr->devlist, final_queue);
+ 	}
+ }
+ 
+-static void
+-dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
+-{
+-	struct request *req;
+-	struct dasd_device *device;
+-	int status;
+-
+-	req = (struct request *) data;
+-	device = cqr->device;
+-	dasd_profile_end(device, cqr, req);
+-	status = cqr->device->discipline->free_cp(cqr,req);
+-	spin_lock_irq(&device->request_queue_lock);
+-	dasd_end_request(req, status);
+-	spin_unlock_irq(&device->request_queue_lock);
+-}
+-
+-
+ /*
+- * Fetch requests from the block device queue.
++ * the cqrs from the final queue are returned to the upper layer
++ * by setting a dasd_block state and calling the callback function
+  */
+-static void
+-__dasd_process_blk_queue(struct dasd_device * device)
++static void __dasd_device_process_final_queue(struct dasd_device *device,
++					      struct list_head *final_queue)
+ {
+-	struct request_queue *queue;
+-	struct request *req;
++	struct list_head *l, *n;
+ 	struct dasd_ccw_req *cqr;
+-	int nr_queued;
+-
+-	queue = device->request_queue;
+-	/* No queue ? Then there is nothing to do. */
+-	if (queue == NULL)
+-		return;
+-
+-	/*
+-	 * We requeue request from the block device queue to the ccw
+-	 * queue only in two states. In state DASD_STATE_READY the
+-	 * partition detection is done and we need to requeue requests
+-	 * for that. State DASD_STATE_ONLINE is normal block device
+-	 * operation.
+-	 */
+-	if (device->state != DASD_STATE_READY &&
+-	    device->state != DASD_STATE_ONLINE)
+-		return;
+-	nr_queued = 0;
+-	/* Now we try to fetch requests from the request queue */
+-	list_for_each_entry(cqr, &device->ccw_queue, list)
+-		if (cqr->status == DASD_CQR_QUEUED)
+-			nr_queued++;
+-	while (!blk_queue_plugged(queue) &&
+-	       elv_next_request(queue) &&
+-		nr_queued < DASD_CHANQ_MAX_SIZE) {
+-		req = elv_next_request(queue);
+ 
+-		if (device->features & DASD_FEATURE_READONLY &&
+-		    rq_data_dir(req) == WRITE) {
+-			DBF_DEV_EVENT(DBF_ERR, device,
+-				      "Rejecting write request %p",
+-				      req);
+-			blkdev_dequeue_request(req);
+-			dasd_end_request(req, 0);
+-			continue;
+-		}
+-		if (device->stopped & DASD_STOPPED_DC_EIO) {
+-			blkdev_dequeue_request(req);
+-			dasd_end_request(req, 0);
+-			continue;
+-		}
+-		cqr = device->discipline->build_cp(device, req);
+-		if (IS_ERR(cqr)) {
+-			if (PTR_ERR(cqr) == -ENOMEM)
+-				break;	/* terminate request queue loop */
+-			if (PTR_ERR(cqr) == -EAGAIN) {
+-				/*
+-				 * The current request cannot be build right
+-				 * now, we have to try later. If this request
+-				 * is the head-of-queue we stop the device
+-				 * for 1/2 second.
+-				 */
+-				if (!list_empty(&device->ccw_queue))
+-					break;
+-				device->stopped |= DASD_STOPPED_PENDING;
+-				dasd_set_timer(device, HZ/2);
+-				break;
+-			}
+-			DBF_DEV_EVENT(DBF_ERR, device,
+-				      "CCW creation failed (rc=%ld) "
+-				      "on request %p",
+-				      PTR_ERR(cqr), req);
+-			blkdev_dequeue_request(req);
+-			dasd_end_request(req, 0);
+-			continue;
++	list_for_each_safe(l, n, final_queue) {
++		cqr = list_entry(l, struct dasd_ccw_req, devlist);
++		list_del_init(&cqr->devlist);
++		if (cqr->block)
++			spin_lock_bh(&cqr->block->queue_lock);
++		switch (cqr->status) {
++		case DASD_CQR_SUCCESS:
++			cqr->status = DASD_CQR_DONE;
++			break;
++		case DASD_CQR_ERROR:
++			cqr->status = DASD_CQR_NEED_ERP;
++			break;
++		case DASD_CQR_CLEARED:
++			cqr->status = DASD_CQR_TERMINATED;
++			break;
++		default:
++			DEV_MESSAGE(KERN_ERR, device,
++				    "wrong cqr status in __dasd_process_final_queue "
++				    "for cqr %p, status %x",
++				    cqr, cqr->status);
++			BUG();
+ 		}
+-		cqr->callback = dasd_end_request_cb;
+-		cqr->callback_data = (void *) req;
+-		cqr->status = DASD_CQR_QUEUED;
+-		blkdev_dequeue_request(req);
+-		list_add_tail(&cqr->list, &device->ccw_queue);
+-		dasd_profile_start(device, cqr, req);
+-		nr_queued++;
++		if (cqr->block)
++			spin_unlock_bh(&cqr->block->queue_lock);
++		if (cqr->callback != NULL)
++			(cqr->callback)(cqr, cqr->callback_data);
+ 	}
+ }
+ 
++
++
+ /*
+  * Take a look at the first request on the ccw queue and check
+  * if it reached its expire time. If so, terminate the IO.
+  */
+-static void
+-__dasd_check_expire(struct dasd_device * device)
++static void __dasd_device_check_expire(struct dasd_device *device)
+ {
+ 	struct dasd_ccw_req *cqr;
+ 
+ 	if (list_empty(&device->ccw_queue))
+ 		return;
+-	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
++	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
+ 	if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
+ 	    (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
+ 		if (device->discipline->term_IO(cqr) != 0) {
+ 			/* Hmpf, try again in 5 sec */
+-			dasd_set_timer(device, 5*HZ);
+ 			DEV_MESSAGE(KERN_ERR, device,
+ 				    "internal error - timeout (%is) expired "
+ 				    "for cqr %p, termination failed, "
+ 				    "retrying in 5s",
+ 				    (cqr->expires/HZ), cqr);
++			cqr->expires += 5*HZ;
++			dasd_device_set_timer(device, 5*HZ);
+ 		} else {
+ 			DEV_MESSAGE(KERN_ERR, device,
+ 				    "internal error - timeout (%is) expired "
+@@ -1301,77 +1217,53 @@ __dasd_check_expire(struct dasd_device * device)
+  * Take a look at the first request on the ccw queue and check
+  * if it needs to be started.
+  */
+-static void
+-__dasd_start_head(struct dasd_device * device)
++static void __dasd_device_start_head(struct dasd_device *device)
+ {
+ 	struct dasd_ccw_req *cqr;
+ 	int rc;
+ 
+ 	if (list_empty(&device->ccw_queue))
+ 		return;
+-	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
++	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
+ 	if (cqr->status != DASD_CQR_QUEUED)
+ 		return;
+-	/* Non-temporary stop condition will trigger fail fast */
+-	if (device->stopped & ~DASD_STOPPED_PENDING &&
+-	    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
+-	    (!dasd_eer_enabled(device))) {
+-		cqr->status = DASD_CQR_FAILED;
+-		dasd_schedule_bh(device);
++	/* when device is stopped, return request to previous layer */
++	if (device->stopped) {
++		cqr->status = DASD_CQR_CLEARED;
++		dasd_schedule_device_bh(device);
+ 		return;
+ 	}
+-	/* Don't try to start requests if device is stopped */
+-	if (device->stopped)
+-		return;
+ 
+ 	rc = device->discipline->start_IO(cqr);
+ 	if (rc == 0)
+-		dasd_set_timer(device, cqr->expires);
++		dasd_device_set_timer(device, cqr->expires);
+ 	else if (rc == -EACCES) {
+-		dasd_schedule_bh(device);
++		dasd_schedule_device_bh(device);
+ 	} else
+ 		/* Hmpf, try again in 1/2 sec */
+-		dasd_set_timer(device, 50);
+-}
+-
+-static inline int
+-_wait_for_clear(struct dasd_ccw_req *cqr)
+-{
+-	return (cqr->status == DASD_CQR_QUEUED);
++		dasd_device_set_timer(device, 50);
+ }
+ 
+ /*
+- * Remove all requests from the ccw queue (all = '1') or only block device
+- * requests in case all = '0'.
+- * Take care of the erp-chain (chained via cqr->refers) and remove either
+- * the whole erp-chain or none of the erp-requests.
+- * If a request is currently running, term_IO is called and the request
+- * is re-queued. Prior to removing the terminated request we need to wait
+- * for the clear-interrupt.
+- * In case termination is not possible we stop processing and just finishing
+- * the already moved requests.
++ * Go through all request on the dasd_device request queue,
++ * terminate them on the cdev if necessary, and return them to the
++ * submitting layer via callback.
++ * Note:
++ * Make sure that all 'submitting layers' still exist when
++ * this function is called!. In other words, when 'device' is a base
++ * device then all block layer requests must have been removed before
++ * via dasd_flush_block_queue.
+  */
+-static int
+-dasd_flush_ccw_queue(struct dasd_device * device, int all)
++int dasd_flush_device_queue(struct dasd_device *device)
+ {
+-	struct dasd_ccw_req *cqr, *orig, *n;
+-	int rc, i;
+-
++	struct dasd_ccw_req *cqr, *n;
++	int rc;
+ 	struct list_head flush_queue;
+ 
+ 	INIT_LIST_HEAD(&flush_queue);
+ 	spin_lock_irq(get_ccwdev_lock(device->cdev));
+ 	rc = 0;
+-restart:
+-	list_for_each_entry_safe(cqr, n, &device->ccw_queue, list) {
+-		/* get original request of erp request-chain */
+-		for (orig = cqr; orig->refers != NULL; orig = orig->refers);
+-
+-		/* Flush all request or only block device requests? */
+-		if (all == 0 && cqr->callback != dasd_end_request_cb &&
+-		    orig->callback != dasd_end_request_cb) {
+-			continue;
+-		}
++	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
+ 		/* Check status and move request to flush_queue */
+ 		switch (cqr->status) {
+ 		case DASD_CQR_IN_IO:
+@@ -1387,90 +1279,60 @@ restart:
+ 			}
+ 			break;
+ 		case DASD_CQR_QUEUED:
+-		case DASD_CQR_ERROR:
+-			/* set request to FAILED */
+ 			cqr->stopclk = get_clock();
+-			cqr->status = DASD_CQR_FAILED;
++			cqr->status = DASD_CQR_CLEARED;
+ 			break;
+-		default: /* do not touch the others */
++		default: /* no need to modify the others */
+ 			break;
+ 		}
+-		/* Rechain request (including erp chain) */
+-		for (i = 0; cqr != NULL; cqr = cqr->refers, i++) {
+-			cqr->endclk = get_clock();
+-			list_move_tail(&cqr->list, &flush_queue);
+-		}
+-		if (i > 1)
+-			/* moved more than one request - need to restart */
+-			goto restart;
++		list_move_tail(&cqr->devlist, &flush_queue);
+ 	}
+-
+ finished:
+ 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+-	/* Now call the callback function of flushed requests */
+-restart_cb:
+-	list_for_each_entry_safe(cqr, n, &flush_queue, list) {
+-		if (cqr->status == DASD_CQR_CLEAR) {
+-			/* wait for clear interrupt! */
+-			wait_event(dasd_flush_wq, _wait_for_clear(cqr));
+-			cqr->status = DASD_CQR_FAILED;
+-		}
+-		/* Process finished ERP request. */
+-		if (cqr->refers) {
+-			__dasd_process_erp(device, cqr);
+-			/* restart list_for_xx loop since dasd_process_erp
+-			 * might remove multiple elements */
+-			goto restart_cb;
+-		}
+-		/* call the callback function */
+-		cqr->endclk = get_clock();
+-		if (cqr->callback != NULL)
+-			(cqr->callback)(cqr, cqr->callback_data);
+-	}
++	/*
++	 * After this point all requests must be in state CLEAR_PENDING,
++	 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
++	 * one of the others.
++	 */
++	list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
++		wait_event(dasd_flush_wq,
++			   (cqr->status != DASD_CQR_CLEAR_PENDING));
++	/*
++	 * Now set each request back to TERMINATED, DONE or NEED_ERP
++	 * and call the callback function of flushed requests
++	 */
++	__dasd_device_process_final_queue(device, &flush_queue);
+ 	return rc;
+ }
+ 
+ /*
+  * Acquire the device lock and process queues for the device.
+  */
+-static void
+-dasd_tasklet(struct dasd_device * device)
++static void dasd_device_tasklet(struct dasd_device *device)
+ {
+ 	struct list_head final_queue;
+-	struct list_head *l, *n;
+-	struct dasd_ccw_req *cqr;
+ 
+ 	atomic_set (&device->tasklet_scheduled, 0);
+ 	INIT_LIST_HEAD(&final_queue);
+ 	spin_lock_irq(get_ccwdev_lock(device->cdev));
+ 	/* Check expire time of first request on the ccw queue. */
+-	__dasd_check_expire(device);
+-	/* Finish off requests on ccw queue */
+-	__dasd_process_ccw_queue(device, &final_queue);
++	__dasd_device_check_expire(device);
++	/* find final requests on ccw queue */
++	__dasd_device_process_ccw_queue(device, &final_queue);
+ 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ 	/* Now call the callback function of requests with final status */
+-	list_for_each_safe(l, n, &final_queue) {
+-		cqr = list_entry(l, struct dasd_ccw_req, list);
+-		list_del_init(&cqr->list);
+-		if (cqr->callback != NULL)
+-			(cqr->callback)(cqr, cqr->callback_data);
+-	}
+-	spin_lock_irq(&device->request_queue_lock);
+-	spin_lock(get_ccwdev_lock(device->cdev));
+-	/* Get new request from the block device request queue */
+-	__dasd_process_blk_queue(device);
++	__dasd_device_process_final_queue(device, &final_queue);
++	spin_lock_irq(get_ccwdev_lock(device->cdev));
+ 	/* Now check if the head of the ccw queue needs to be started. */
+-	__dasd_start_head(device);
+-	spin_unlock(get_ccwdev_lock(device->cdev));
+-	spin_unlock_irq(&device->request_queue_lock);
++	__dasd_device_start_head(device);
++	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ 	dasd_put_device(device);
+ }
+ 
+ /*
+  * Schedules a call to dasd_tasklet over the device tasklet.
+  */
+-void
+-dasd_schedule_bh(struct dasd_device * device)
++void dasd_schedule_device_bh(struct dasd_device *device)
+ {
+ 	/* Protect against rescheduling. */
+ 	if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
+@@ -1480,160 +1342,109 @@ dasd_schedule_bh(struct dasd_device * device)
+ }
+ 
+ /*
+- * Queue a request to the head of the ccw_queue. Start the I/O if
+- * possible.
++ * Queue a request to the head of the device ccw_queue.
++ * Start the I/O if possible.
+  */
+-void
+-dasd_add_request_head(struct dasd_ccw_req *req)
++void dasd_add_request_head(struct dasd_ccw_req *cqr)
+ {
+ 	struct dasd_device *device;
+ 	unsigned long flags;
+ 
+-	device = req->device;
++	device = cqr->startdev;
+ 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+-	req->status = DASD_CQR_QUEUED;
+-	req->device = device;
+-	list_add(&req->list, &device->ccw_queue);
++	cqr->status = DASD_CQR_QUEUED;
++	list_add(&cqr->devlist, &device->ccw_queue);
+ 	/* let the bh start the request to keep them in order */
+-	dasd_schedule_bh(device);
++	dasd_schedule_device_bh(device);
+ 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ }
+ 
+ /*
+- * Queue a request to the tail of the ccw_queue. Start the I/O if
+- * possible.
++ * Queue a request to the tail of the device ccw_queue.
++ * Start the I/O if possible.
+  */
+-void
+-dasd_add_request_tail(struct dasd_ccw_req *req)
++void dasd_add_request_tail(struct dasd_ccw_req *cqr)
+ {
+ 	struct dasd_device *device;
+ 	unsigned long flags;
+ 
+-	device = req->device;
++	device = cqr->startdev;
+ 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+-	req->status = DASD_CQR_QUEUED;
+-	req->device = device;
+-	list_add_tail(&req->list, &device->ccw_queue);
++	cqr->status = DASD_CQR_QUEUED;
++	list_add_tail(&cqr->devlist, &device->ccw_queue);
+ 	/* let the bh start the request to keep them in order */
+-	dasd_schedule_bh(device);
++	dasd_schedule_device_bh(device);
+ 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ }
+ 
+ /*
+- * Wakeup callback.
++ * Wakeup helper for the 'sleep_on' functions.
+  */
+-static void
+-dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
++static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
+ {
+ 	wake_up((wait_queue_head_t *) data);
+ }
+ 
+-static inline int
+-_wait_for_wakeup(struct dasd_ccw_req *cqr)
++static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
+ {
+ 	struct dasd_device *device;
+ 	int rc;
+ 
+-	device = cqr->device;
++	device = cqr->startdev;
+ 	spin_lock_irq(get_ccwdev_lock(device->cdev));
+ 	rc = ((cqr->status == DASD_CQR_DONE ||
+-	       cqr->status == DASD_CQR_FAILED) &&
+-	      list_empty(&cqr->list));
++	       cqr->status == DASD_CQR_NEED_ERP ||
++	       cqr->status == DASD_CQR_TERMINATED) &&
++	      list_empty(&cqr->devlist));
+ 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ 	return rc;
+ }
+ 
+ /*
+- * Attempts to start a special ccw queue and waits for its completion.
++ * Queue a request to the tail of the device ccw_queue and wait for
++ * it's completion.
+  */
+-int
+-dasd_sleep_on(struct dasd_ccw_req * cqr)
++int dasd_sleep_on(struct dasd_ccw_req *cqr)
+ {
+ 	wait_queue_head_t wait_q;
+ 	struct dasd_device *device;
+ 	int rc;
+ 
+-	device = cqr->device;
+-	spin_lock_irq(get_ccwdev_lock(device->cdev));
++	device = cqr->startdev;
+ 
+ 	init_waitqueue_head (&wait_q);
+ 	cqr->callback = dasd_wakeup_cb;
+ 	cqr->callback_data = (void *) &wait_q;
+-	cqr->status = DASD_CQR_QUEUED;
+-	list_add_tail(&cqr->list, &device->ccw_queue);
+-
+-	/* let the bh start the request to keep them in order */
+-	dasd_schedule_bh(device);
+-
+-	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+-
++	dasd_add_request_tail(cqr);
+ 	wait_event(wait_q, _wait_for_wakeup(cqr));
+ 
+ 	/* Request status is either done or failed. */
+-	rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
++	rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
+ 	return rc;
+ }
+ 
+ /*
+- * Attempts to start a special ccw queue and wait interruptible
+- * for its completion.
++ * Queue a request to the tail of the device ccw_queue and wait
++ * interruptible for it's completion.
+  */
+-int
+-dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
++int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
+ {
+ 	wait_queue_head_t wait_q;
+ 	struct dasd_device *device;
+-	int rc, finished;
+-
+-	device = cqr->device;
+-	spin_lock_irq(get_ccwdev_lock(device->cdev));
++	int rc;
+ 
++	device = cqr->startdev;
+ 	init_waitqueue_head (&wait_q);
+ 	cqr->callback = dasd_wakeup_cb;
+ 	cqr->callback_data = (void *) &wait_q;
+-	cqr->status = DASD_CQR_QUEUED;
+-	list_add_tail(&cqr->list, &device->ccw_queue);
+-
+-	/* let the bh start the request to keep them in order */
+-	dasd_schedule_bh(device);
+-	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+-
+-	finished = 0;
+-	while (!finished) {
+-		rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
+-		if (rc != -ERESTARTSYS) {
+-			/* Request is final (done or failed) */
+-			rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
+-			break;
+-		}
+-		spin_lock_irq(get_ccwdev_lock(device->cdev));
+-		switch (cqr->status) {
+-		case DASD_CQR_IN_IO:
+-                        /* terminate runnig cqr */
+-			if (device->discipline->term_IO) {
+-				cqr->retries = -1;
+-				device->discipline->term_IO(cqr);
+-				/* wait (non-interruptible) for final status
+-				 * because signal ist still pending */
+-				spin_unlock_irq(get_ccwdev_lock(device->cdev));
+-				wait_event(wait_q, _wait_for_wakeup(cqr));
+-				spin_lock_irq(get_ccwdev_lock(device->cdev));
+-				rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
+-				finished = 1;
+-			}
+-			break;
+-		case DASD_CQR_QUEUED:
+-			/* request  */
+-			list_del_init(&cqr->list);
+-			rc = -EIO;
+-			finished = 1;
+-			break;
+-		default:
+-			/* cqr with 'non-interruptable' status - just wait */
+-			break;
+-		}
+-		spin_unlock_irq(get_ccwdev_lock(device->cdev));
++	dasd_add_request_tail(cqr);
++	rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
++	if (rc == -ERESTARTSYS) {
++		dasd_cancel_req(cqr);
++		/* wait (non-interruptible) for final status */
++		wait_event(wait_q, _wait_for_wakeup(cqr));
+ 	}
++	rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
+ 	return rc;
+ }
+ 
+@@ -1643,25 +1454,23 @@ dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
+  * and be put back to status queued, before the special request is added
+  * to the head of the queue. Then the special request is waited on normally.
+  */
+-static inline int
+-_dasd_term_running_cqr(struct dasd_device *device)
++static inline int _dasd_term_running_cqr(struct dasd_device *device)
+ {
+ 	struct dasd_ccw_req *cqr;
+ 
+ 	if (list_empty(&device->ccw_queue))
+ 		return 0;
+-	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
++	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
+ 	return device->discipline->term_IO(cqr);
+ }
+ 
+-int
+-dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
++int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
+ {
+ 	wait_queue_head_t wait_q;
+ 	struct dasd_device *device;
+ 	int rc;
+ 
+-	device = cqr->device;
++	device = cqr->startdev;
+ 	spin_lock_irq(get_ccwdev_lock(device->cdev));
+ 	rc = _dasd_term_running_cqr(device);
+ 	if (rc) {
+@@ -1673,17 +1482,17 @@ dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
+ 	cqr->callback = dasd_wakeup_cb;
+ 	cqr->callback_data = (void *) &wait_q;
+ 	cqr->status = DASD_CQR_QUEUED;
+-	list_add(&cqr->list, &device->ccw_queue);
++	list_add(&cqr->devlist, &device->ccw_queue);
+ 
+ 	/* let the bh start the request to keep them in order */
+-	dasd_schedule_bh(device);
++	dasd_schedule_device_bh(device);
+ 
+ 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ 
+ 	wait_event(wait_q, _wait_for_wakeup(cqr));
+ 
+ 	/* Request status is either done or failed. */
+-	rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
++	rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
+ 	return rc;
+ }
+ 
+@@ -1692,11 +1501,14 @@ dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
+  * This is useful to timeout requests. The request will be
+  * terminated if it is currently in i/o.
+  * Returns 1 if the request has been terminated.
++ *	   0 if there was no need to terminate the request (not started yet)
++ *	   negative error code if termination failed
++ * Cancellation of a request is an asynchronous operation! The calling
++ * function has to wait until the request is properly returned via callback.
+  */
+-int
+-dasd_cancel_req(struct dasd_ccw_req *cqr)
++int dasd_cancel_req(struct dasd_ccw_req *cqr)
+ {
+-	struct dasd_device *device = cqr->device;
++	struct dasd_device *device = cqr->startdev;
+ 	unsigned long flags;
+ 	int rc;
+ 
+@@ -1704,74 +1516,453 @@ dasd_cancel_req(struct dasd_ccw_req *cqr)
+ 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ 	switch (cqr->status) {
+ 	case DASD_CQR_QUEUED:
+-		/* request was not started - just set to failed */
+-		cqr->status = DASD_CQR_FAILED;
++		/* request was not started - just set to cleared */
++		cqr->status = DASD_CQR_CLEARED;
+ 		break;
+ 	case DASD_CQR_IN_IO:
+ 		/* request in IO - terminate IO and release again */
+-		if (device->discipline->term_IO(cqr) != 0)
+-			/* what to do if unable to terminate ??????
+-			   e.g. not _IN_IO */
+-			cqr->status = DASD_CQR_FAILED;
+-		cqr->stopclk = get_clock();
+-		rc = 1;
++		rc = device->discipline->term_IO(cqr);
++		if (rc) {
++			DEV_MESSAGE(KERN_ERR, device,
++				    "dasd_cancel_req is unable "
++				    " to terminate request %p, rc = %d",
++				    cqr, rc);
++		} else {
++			cqr->stopclk = get_clock();
++			rc = 1;
++		}
+ 		break;
+-	case DASD_CQR_DONE:
+-	case DASD_CQR_FAILED:
+-		/* already finished - do nothing */
++	default: /* already finished or clear pending - do nothing */
+ 		break;
+-	default:
+-		DEV_MESSAGE(KERN_ALERT, device,
+-			    "invalid status %02x in request",
+-			    cqr->status);
++	}
++	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
++	dasd_schedule_device_bh(device);
++	return rc;
++}
++
++
++/*
++ * SECTION: Operations of the dasd_block layer.
++ */
++
++/*
++ * Timeout function for dasd_block. This is used when the block layer
++ * is waiting for something that may not come reliably, (e.g. a state
++ * change interrupt)
++ */
++static void dasd_block_timeout(unsigned long ptr)
++{
++	unsigned long flags;
++	struct dasd_block *block;
++
++	block = (struct dasd_block *) ptr;
++	spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
++	/* re-activate request queue */
++	block->base->stopped &= ~DASD_STOPPED_PENDING;
++	spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
++	dasd_schedule_block_bh(block);
++}
++
++/*
++ * Setup timeout for a dasd_block in jiffies.
++ */
++void dasd_block_set_timer(struct dasd_block *block, int expires)
++{
++	if (expires == 0) {
++		if (timer_pending(&block->timer))
++			del_timer(&block->timer);
++		return;
++	}
++	if (timer_pending(&block->timer)) {
++		if (mod_timer(&block->timer, jiffies + expires))
++			return;
++	}
++	block->timer.function = dasd_block_timeout;
++	block->timer.data = (unsigned long) block;
++	block->timer.expires = jiffies + expires;
++	add_timer(&block->timer);
++}
++
++/*
++ * Clear timeout for a dasd_block.
++ */
++void dasd_block_clear_timer(struct dasd_block *block)
++{
++	if (timer_pending(&block->timer))
++		del_timer(&block->timer);
++}
++
++/*
++ * posts the buffer_cache about a finalized request
++ */
++static inline void dasd_end_request(struct request *req, int uptodate)
++{
++	if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
+ 		BUG();
++	add_disk_randomness(req->rq_disk);
++	end_that_request_last(req, uptodate);
++}
++
++/*
++ * Process finished error recovery ccw.
++ */
++static inline void __dasd_block_process_erp(struct dasd_block *block,
++					    struct dasd_ccw_req *cqr)
++{
++	dasd_erp_fn_t erp_fn;
++	struct dasd_device *device = block->base;
+ 
++	if (cqr->status == DASD_CQR_DONE)
++		DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
++	else
++		DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
++	erp_fn = device->discipline->erp_postaction(cqr);
++	erp_fn(cqr);
++}
++
++/*
++ * Fetch requests from the block device queue.
++ */
++static void __dasd_process_request_queue(struct dasd_block *block)
++{
++	struct request_queue *queue;
++	struct request *req;
++	struct dasd_ccw_req *cqr;
++	struct dasd_device *basedev;
++	unsigned long flags;
++	queue = block->request_queue;
++	basedev = block->base;
++	/* No queue ? Then there is nothing to do. */
++	if (queue == NULL)
++		return;
++
++	/*
++	 * We requeue request from the block device queue to the ccw
++	 * queue only in two states. In state DASD_STATE_READY the
++	 * partition detection is done and we need to requeue requests
++	 * for that. State DASD_STATE_ONLINE is normal block device
++	 * operation.
++	 */
++	if (basedev->state < DASD_STATE_READY)
++		return;
++	/* Now we try to fetch requests from the request queue */
++	while (!blk_queue_plugged(queue) &&
++	       elv_next_request(queue)) {
++
++		req = elv_next_request(queue);
++
++		if (basedev->features & DASD_FEATURE_READONLY &&
++		    rq_data_dir(req) == WRITE) {
++			DBF_DEV_EVENT(DBF_ERR, basedev,
++				      "Rejecting write request %p",
++				      req);
++			blkdev_dequeue_request(req);
++			dasd_end_request(req, 0);
++			continue;
++		}
++		cqr = basedev->discipline->build_cp(basedev, block, req);
++		if (IS_ERR(cqr)) {
++			if (PTR_ERR(cqr) == -EBUSY)
++				break;	/* normal end condition */
++			if (PTR_ERR(cqr) == -ENOMEM)
++				break;	/* terminate request queue loop */
++			if (PTR_ERR(cqr) == -EAGAIN) {
++				/*
++				 * The current request cannot be build right
++				 * now, we have to try later. If this request
++				 * is the head-of-queue we stop the device
++				 * for 1/2 second.
++				 */
++				if (!list_empty(&block->ccw_queue))
++					break;
++				spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags);
++				basedev->stopped |= DASD_STOPPED_PENDING;
++				spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags);
++				dasd_block_set_timer(block, HZ/2);
++				break;
++			}
++			DBF_DEV_EVENT(DBF_ERR, basedev,
++				      "CCW creation failed (rc=%ld) "
++				      "on request %p",
++				      PTR_ERR(cqr), req);
++			blkdev_dequeue_request(req);
++			dasd_end_request(req, 0);
++			continue;
++		}
++		/*
++		 *  Note: callback is set to dasd_return_cqr_cb in
++		 * __dasd_block_start_head to cover erp requests as well
++		 */
++		cqr->callback_data = (void *) req;
++		cqr->status = DASD_CQR_FILLED;
++		blkdev_dequeue_request(req);
++		list_add_tail(&cqr->blocklist, &block->ccw_queue);
++		dasd_profile_start(block, cqr, req);
++	}
++}
++
++static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
++{
++	struct request *req;
++	int status;
++
++	req = (struct request *) cqr->callback_data;
++	dasd_profile_end(cqr->block, cqr, req);
++	status = cqr->memdev->discipline->free_cp(cqr, req);
++	dasd_end_request(req, status);
++}
++
++/*
++ * Process ccw request queue.
++ */
++static void __dasd_process_block_ccw_queue(struct dasd_block *block,
++					   struct list_head *final_queue)
++{
++	struct list_head *l, *n;
++	struct dasd_ccw_req *cqr;
++	dasd_erp_fn_t erp_fn;
++	unsigned long flags;
++	struct dasd_device *base = block->base;
++
++restart:
++	/* Process request with final status. */
++	list_for_each_safe(l, n, &block->ccw_queue) {
++		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
++		if (cqr->status != DASD_CQR_DONE &&
++		    cqr->status != DASD_CQR_FAILED &&
++		    cqr->status != DASD_CQR_NEED_ERP &&
++		    cqr->status != DASD_CQR_TERMINATED)
++			continue;
++
++		if (cqr->status == DASD_CQR_TERMINATED) {
++			base->discipline->handle_terminated_request(cqr);
++			goto restart;
++		}
++
++		/*  Process requests that may be recovered */
++		if (cqr->status == DASD_CQR_NEED_ERP) {
++			if (cqr->irb.esw.esw0.erw.cons &&
++			    test_bit(DASD_CQR_FLAGS_USE_ERP,
++				     &cqr->flags)) {
++				erp_fn = base->discipline->erp_action(cqr);
++				erp_fn(cqr);
++			}
++			goto restart;
++		}
++
++		/* First of all call extended error reporting. */
++		if (dasd_eer_enabled(base) &&
++		    cqr->status == DASD_CQR_FAILED) {
++			dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
++
++			/* restart request  */
++			cqr->status = DASD_CQR_FILLED;
++			cqr->retries = 255;
++			spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
++			base->stopped |= DASD_STOPPED_QUIESCE;
++			spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
++					       flags);
++			goto restart;
++		}
++
++		/* Process finished ERP request. */
++		if (cqr->refers) {
++			__dasd_block_process_erp(block, cqr);
++			goto restart;
++		}
++
++		/* Rechain finished requests to final queue */
++		cqr->endclk = get_clock();
++		list_move_tail(&cqr->blocklist, final_queue);
++	}
++}
++
++static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
++{
++	dasd_schedule_block_bh(cqr->block);
++}
++
++static void __dasd_block_start_head(struct dasd_block *block)
++{
++	struct dasd_ccw_req *cqr;
++
++	if (list_empty(&block->ccw_queue))
++		return;
++	/* We allways begin with the first requests on the queue, as some
++	 * of previously started requests have to be enqueued on a
++	 * dasd_device again for error recovery.
++	 */
++	list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
++		if (cqr->status != DASD_CQR_FILLED)
++			continue;
++		/* Non-temporary stop condition will trigger fail fast */
++		if (block->base->stopped & ~DASD_STOPPED_PENDING &&
++		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
++		    (!dasd_eer_enabled(block->base))) {
++			cqr->status = DASD_CQR_FAILED;
++			dasd_schedule_block_bh(block);
++			continue;
++		}
++		/* Don't try to start requests if device is stopped */
++		if (block->base->stopped)
++			return;
++
++		/* just a fail safe check, should not happen */
++		if (!cqr->startdev)
++			cqr->startdev = block->base;
++
++		/* make sure that the requests we submit find their way back */
++		cqr->callback = dasd_return_cqr_cb;
++
++		dasd_add_request_tail(cqr);
++	}
++}
++
++/*
++ * Central dasd_block layer routine. Takes requests from the generic
++ * block layer request queue, creates ccw requests, enqueues them on
++ * a dasd_device and processes ccw requests that have been returned.
++ */
++static void dasd_block_tasklet(struct dasd_block *block)
++{
++	struct list_head final_queue;
++	struct list_head *l, *n;
++	struct dasd_ccw_req *cqr;
++
++	atomic_set(&block->tasklet_scheduled, 0);
++	INIT_LIST_HEAD(&final_queue);
++	spin_lock(&block->queue_lock);
++	/* Finish off requests on ccw queue */
++	__dasd_process_block_ccw_queue(block, &final_queue);
++	spin_unlock(&block->queue_lock);
++	/* Now call the callback function of requests with final status */
++	spin_lock_irq(&block->request_queue_lock);
++	list_for_each_safe(l, n, &final_queue) {
++		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
++		list_del_init(&cqr->blocklist);
++		__dasd_cleanup_cqr(cqr);
++	}
++	spin_lock(&block->queue_lock);
++	/* Get new request from the block device request queue */
++	__dasd_process_request_queue(block);
++	/* Now check if the head of the ccw queue needs to be started. */
++	__dasd_block_start_head(block);
++	spin_unlock(&block->queue_lock);
++	spin_unlock_irq(&block->request_queue_lock);
++	dasd_put_device(block->base);
++}
++
++static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
++{
++	wake_up(&dasd_flush_wq);
++}
++
++/*
++ * Go through all request on the dasd_block request queue, cancel them
++ * on the respective dasd_device, and return them to the generic
++ * block layer.
++ */
++static int dasd_flush_block_queue(struct dasd_block *block)
++{
++	struct dasd_ccw_req *cqr, *n;
++	int rc, i;
++	struct list_head flush_queue;
++
++	INIT_LIST_HEAD(&flush_queue);
++	spin_lock_bh(&block->queue_lock);
++	rc = 0;
++restart:
++	list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
++		/* if this request currently owned by a dasd_device cancel it */
++		if (cqr->status >= DASD_CQR_QUEUED)
++			rc = dasd_cancel_req(cqr);
++		if (rc < 0)
++			break;
++		/* Rechain request (including erp chain) so it won't be
++		 * touched by the dasd_block_tasklet anymore.
++		 * Replace the callback so we notice when the request
++		 * is returned from the dasd_device layer.
++		 */
++		cqr->callback = _dasd_wake_block_flush_cb;
++		for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
++			list_move_tail(&cqr->blocklist, &flush_queue);
++		if (i > 1)
++			/* moved more than one request - need to restart */
++			goto restart;
++	}
++	spin_unlock_bh(&block->queue_lock);
++	/* Now call the callback function of flushed requests */
++restart_cb:
++	list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
++		wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
++		/* Process finished ERP request. */
++		if (cqr->refers) {
++			__dasd_block_process_erp(block, cqr);
++			/* restart list_for_xx loop since dasd_process_erp
++			 * might remove multiple elements */
++			goto restart_cb;
++		}
++		/* call the callback function */
++		cqr->endclk = get_clock();
++		list_del_init(&cqr->blocklist);
++		__dasd_cleanup_cqr(cqr);
+ 	}
+-	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+-	dasd_schedule_bh(device);
+ 	return rc;
+ }
+ 
+ /*
+- * SECTION: Block device operations (request queue, partitions, open, release).
++ * Schedules a call to dasd_tasklet over the device tasklet.
++ */
++void dasd_schedule_block_bh(struct dasd_block *block)
++{
++	/* Protect against rescheduling. */
++	if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
++		return;
++	/* life cycle of block is bound to it's base device */
++	dasd_get_device(block->base);
++	tasklet_hi_schedule(&block->tasklet);
++}
++
++
++/*
++ * SECTION: external block device operations
++ * (request queue handling, open, release, etc.)
+  */
+ 
+ /*
+  * Dasd request queue function. Called from ll_rw_blk.c
+  */
+-static void
+-do_dasd_request(struct request_queue * queue)
++static void do_dasd_request(struct request_queue *queue)
+ {
+-	struct dasd_device *device;
++	struct dasd_block *block;
+ 
+-	device = (struct dasd_device *) queue->queuedata;
+-	spin_lock(get_ccwdev_lock(device->cdev));
++	block = queue->queuedata;
++	spin_lock(&block->queue_lock);
+ 	/* Get new request from the block device request queue */
+-	__dasd_process_blk_queue(device);
++	__dasd_process_request_queue(block);
+ 	/* Now check if the head of the ccw queue needs to be started. */
+-	__dasd_start_head(device);
+-	spin_unlock(get_ccwdev_lock(device->cdev));
++	__dasd_block_start_head(block);
++	spin_unlock(&block->queue_lock);
+ }
+ 
+ /*
+  * Allocate and initialize request queue and default I/O scheduler.
+  */
+-static int
+-dasd_alloc_queue(struct dasd_device * device)
++static int dasd_alloc_queue(struct dasd_block *block)
+ {
+ 	int rc;
+ 
+-	device->request_queue = blk_init_queue(do_dasd_request,
+-					       &device->request_queue_lock);
+-	if (device->request_queue == NULL)
++	block->request_queue = blk_init_queue(do_dasd_request,
++					       &block->request_queue_lock);
++	if (block->request_queue == NULL)
+ 		return -ENOMEM;
+ 
+-	device->request_queue->queuedata = device;
++	block->request_queue->queuedata = block;
+ 
+-	elevator_exit(device->request_queue->elevator);
+-	rc = elevator_init(device->request_queue, "deadline");
++	elevator_exit(block->request_queue->elevator);
++	rc = elevator_init(block->request_queue, "deadline");
+ 	if (rc) {
+-		blk_cleanup_queue(device->request_queue);
++		blk_cleanup_queue(block->request_queue);
+ 		return rc;
+ 	}
+ 	return 0;
+@@ -1780,79 +1971,76 @@ dasd_alloc_queue(struct dasd_device * device)
+ /*
+  * Allocate and initialize request queue.
+  */
+-static void
+-dasd_setup_queue(struct dasd_device * device)
++static void dasd_setup_queue(struct dasd_block *block)
+ {
+ 	int max;
+ 
+-	blk_queue_hardsect_size(device->request_queue, device->bp_block);
+-	max = device->discipline->max_blocks << device->s2b_shift;
+-	blk_queue_max_sectors(device->request_queue, max);
+-	blk_queue_max_phys_segments(device->request_queue, -1L);
+-	blk_queue_max_hw_segments(device->request_queue, -1L);
+-	blk_queue_max_segment_size(device->request_queue, -1L);
+-	blk_queue_segment_boundary(device->request_queue, -1L);
+-	blk_queue_ordered(device->request_queue, QUEUE_ORDERED_TAG, NULL);
++	blk_queue_hardsect_size(block->request_queue, block->bp_block);
++	max = block->base->discipline->max_blocks << block->s2b_shift;
++	blk_queue_max_sectors(block->request_queue, max);
++	blk_queue_max_phys_segments(block->request_queue, -1L);
++	blk_queue_max_hw_segments(block->request_queue, -1L);
++	blk_queue_max_segment_size(block->request_queue, -1L);
++	blk_queue_segment_boundary(block->request_queue, -1L);
++	blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
+ }
+ 
+ /*
+  * Deactivate and free request queue.
+  */
+-static void
+-dasd_free_queue(struct dasd_device * device)
++static void dasd_free_queue(struct dasd_block *block)
+ {
+-	if (device->request_queue) {
+-		blk_cleanup_queue(device->request_queue);
+-		device->request_queue = NULL;
++	if (block->request_queue) {
++		blk_cleanup_queue(block->request_queue);
++		block->request_queue = NULL;
+ 	}
+ }
+ 
+ /*
+  * Flush request on the request queue.
+  */
+-static void
+-dasd_flush_request_queue(struct dasd_device * device)
++static void dasd_flush_request_queue(struct dasd_block *block)
+ {
+ 	struct request *req;
+ 
+-	if (!device->request_queue)
++	if (!block->request_queue)
+ 		return;
+ 
+-	spin_lock_irq(&device->request_queue_lock);
+-	while ((req = elv_next_request(device->request_queue))) {
++	spin_lock_irq(&block->request_queue_lock);
++	while ((req = elv_next_request(block->request_queue))) {
+ 		blkdev_dequeue_request(req);
+ 		dasd_end_request(req, 0);
+ 	}
+-	spin_unlock_irq(&device->request_queue_lock);
++	spin_unlock_irq(&block->request_queue_lock);
+ }
+ 
+-static int
+-dasd_open(struct inode *inp, struct file *filp)
++static int dasd_open(struct inode *inp, struct file *filp)
+ {
+ 	struct gendisk *disk = inp->i_bdev->bd_disk;
+-	struct dasd_device *device = disk->private_data;
++	struct dasd_block *block = disk->private_data;
++	struct dasd_device *base = block->base;
+ 	int rc;
+ 
+-        atomic_inc(&device->open_count);
+-	if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
++	atomic_inc(&block->open_count);
++	if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
+ 		rc = -ENODEV;
+ 		goto unlock;
+ 	}
+ 
+-	if (!try_module_get(device->discipline->owner)) {
++	if (!try_module_get(base->discipline->owner)) {
+ 		rc = -EINVAL;
+ 		goto unlock;
+ 	}
+ 
+ 	if (dasd_probeonly) {
+-		DEV_MESSAGE(KERN_INFO, device, "%s",
++		DEV_MESSAGE(KERN_INFO, base, "%s",
+ 			    "No access to device due to probeonly mode");
+ 		rc = -EPERM;
+ 		goto out;
+ 	}
+ 
+-	if (device->state <= DASD_STATE_BASIC) {
+-		DBF_DEV_EVENT(DBF_ERR, device, " %s",
++	if (base->state <= DASD_STATE_BASIC) {
++		DBF_DEV_EVENT(DBF_ERR, base, " %s",
+ 			      " Cannot open unrecognized device");
+ 		rc = -ENODEV;
+ 		goto out;
+@@ -1861,41 +2049,41 @@ dasd_open(struct inode *inp, struct file *filp)
+ 	return 0;
+ 
+ out:
+-	module_put(device->discipline->owner);
++	module_put(base->discipline->owner);
+ unlock:
+-	atomic_dec(&device->open_count);
++	atomic_dec(&block->open_count);
+ 	return rc;
+ }
+ 
+-static int
+-dasd_release(struct inode *inp, struct file *filp)
++static int dasd_release(struct inode *inp, struct file *filp)
+ {
+ 	struct gendisk *disk = inp->i_bdev->bd_disk;
+-	struct dasd_device *device = disk->private_data;
++	struct dasd_block *block = disk->private_data;
+ 
+-	atomic_dec(&device->open_count);
+-	module_put(device->discipline->owner);
++	atomic_dec(&block->open_count);
++	module_put(block->base->discipline->owner);
+ 	return 0;
+ }
+ 
+ /*
+  * Return disk geometry.
+  */
+-static int
+-dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
++static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+ {
+-	struct dasd_device *device;
++	struct dasd_block *block;
++	struct dasd_device *base;
+ 
+-	device = bdev->bd_disk->private_data;
+-	if (!device)
++	block = bdev->bd_disk->private_data;
++	base = block->base;
++	if (!block)
+ 		return -ENODEV;
+ 
+-	if (!device->discipline ||
+-	    !device->discipline->fill_geometry)
++	if (!base->discipline ||
++	    !base->discipline->fill_geometry)
+ 		return -EINVAL;
+ 
+-	device->discipline->fill_geometry(device, geo);
+-	geo->start = get_start_sect(bdev) >> device->s2b_shift;
++	base->discipline->fill_geometry(block, geo);
++	geo->start = get_start_sect(bdev) >> block->s2b_shift;
+ 	return 0;
+ }
+ 
+@@ -1909,6 +2097,9 @@ dasd_device_operations = {
+ 	.getgeo		= dasd_getgeo,
+ };
+ 
++/*******************************************************************************
++ * end of block device operations
++ */
+ 
+ static void
+ dasd_exit(void)
+@@ -1937,9 +2128,8 @@ dasd_exit(void)
+  * Initial attempt at a probe function. this can be simplified once
+  * the other detection code is gone.
+  */
+-int
+-dasd_generic_probe (struct ccw_device *cdev,
+-		    struct dasd_discipline *discipline)
++int dasd_generic_probe(struct ccw_device *cdev,
++		       struct dasd_discipline *discipline)
+ {
+ 	int ret;
+ 
+@@ -1969,19 +2159,20 @@ dasd_generic_probe (struct ccw_device *cdev,
+ 		ret = ccw_device_set_online(cdev);
+ 	if (ret)
+ 		printk(KERN_WARNING
+-		       "dasd_generic_probe: could not initially online "
+-		       "ccw-device %s\n", cdev->dev.bus_id);
+-	return ret;
++		       "dasd_generic_probe: could not initially "
++		       "online ccw-device %s; return code: %d\n",
++		       cdev->dev.bus_id, ret);
++	return 0;
+ }
+ 
+ /*
+  * This will one day be called from a global not_oper handler.
+  * It is also used by driver_unregister during module unload.
+  */
+-void
+-dasd_generic_remove (struct ccw_device *cdev)
++void dasd_generic_remove(struct ccw_device *cdev)
+ {
+ 	struct dasd_device *device;
++	struct dasd_block *block;
+ 
+ 	cdev->handler = NULL;
+ 
+@@ -2001,7 +2192,15 @@ dasd_generic_remove (struct ccw_device *cdev)
+ 	 */
+ 	dasd_set_target_state(device, DASD_STATE_NEW);
+ 	/* dasd_delete_device destroys the device reference. */
++	block = device->block;
++	device->block = NULL;
+ 	dasd_delete_device(device);
++	/*
++	 * life cycle of block is bound to device, so delete it after
++	 * device was safely removed
++	 */
++	if (block)
++		dasd_free_block(block);
+ }
+ 
+ /*
+@@ -2009,10 +2208,8 @@ dasd_generic_remove (struct ccw_device *cdev)
+  * the device is detected for the first time and is supposed to be used
+  * or the user has started activation through sysfs.
+  */
+-int
+-dasd_generic_set_online (struct ccw_device *cdev,
+-			 struct dasd_discipline *base_discipline)
+-
++int dasd_generic_set_online(struct ccw_device *cdev,
++			    struct dasd_discipline *base_discipline)
+ {
+ 	struct dasd_discipline *discipline;
+ 	struct dasd_device *device;
+@@ -2048,6 +2245,7 @@ dasd_generic_set_online (struct ccw_device *cdev,
+ 	device->base_discipline = base_discipline;
+ 	device->discipline = discipline;
+ 
++	/* check_device will allocate block device if necessary */
+ 	rc = discipline->check_device(device);
+ 	if (rc) {
+ 		printk (KERN_WARNING
+@@ -2067,6 +2265,8 @@ dasd_generic_set_online (struct ccw_device *cdev,
+ 			cdev->dev.bus_id);
+ 		rc = -ENODEV;
+ 		dasd_set_target_state(device, DASD_STATE_NEW);
++		if (device->block)
++			dasd_free_block(device->block);
+ 		dasd_delete_device(device);
+ 	} else
+ 		pr_debug("dasd_generic device %s found\n",
+@@ -2081,10 +2281,10 @@ dasd_generic_set_online (struct ccw_device *cdev,
+ 	return rc;
+ }
+ 
+-int
+-dasd_generic_set_offline (struct ccw_device *cdev)
++int dasd_generic_set_offline(struct ccw_device *cdev)
+ {
+ 	struct dasd_device *device;
++	struct dasd_block *block;
+ 	int max_count, open_count;
+ 
+ 	device = dasd_device_from_cdev(cdev);
+@@ -2101,30 +2301,39 @@ dasd_generic_set_offline (struct ccw_device *cdev)
+ 	 * the blkdev_get in dasd_scan_partitions. We are only interested
+ 	 * in the other openers.
+ 	 */
+-	max_count = device->bdev ? 0 : -1;
+-	open_count = (int) atomic_read(&device->open_count);
+-	if (open_count > max_count) {
+-		if (open_count > 0)
+-			printk (KERN_WARNING "Can't offline dasd device with "
+-				"open count = %i.\n",
+-				open_count);
+-		else
+-			printk (KERN_WARNING "%s",
+-				"Can't offline dasd device due to internal "
+-				"use\n");
+-		clear_bit(DASD_FLAG_OFFLINE, &device->flags);
+-		dasd_put_device(device);
+-		return -EBUSY;
++	if (device->block) {
++		struct dasd_block *block = device->block;
++		max_count = block->bdev ? 0 : -1;
++		open_count = (int) atomic_read(&block->open_count);
++		if (open_count > max_count) {
++			if (open_count > 0)
++				printk(KERN_WARNING "Can't offline dasd "
++				       "device with open count = %i.\n",
++				       open_count);
++			else
++				printk(KERN_WARNING "%s",
++				       "Can't offline dasd device due "
++				       "to internal use\n");
++			clear_bit(DASD_FLAG_OFFLINE, &device->flags);
++			dasd_put_device(device);
++			return -EBUSY;
++		}
+ 	}
+ 	dasd_set_target_state(device, DASD_STATE_NEW);
+ 	/* dasd_delete_device destroys the device reference. */
++	block = device->block;
++	device->block = NULL;
+ 	dasd_delete_device(device);
+-
++	/*
++	 * life cycle of block is bound to device, so delete it after
++	 * device was safely removed
++	 */
++	if (block)
++		dasd_free_block(block);
+ 	return 0;
+ }
+ 
+-int
+-dasd_generic_notify(struct ccw_device *cdev, int event)
++int dasd_generic_notify(struct ccw_device *cdev, int event)
+ {
+ 	struct dasd_device *device;
+ 	struct dasd_ccw_req *cqr;
+@@ -2145,27 +2354,22 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
+ 		if (device->state < DASD_STATE_BASIC)
+ 			break;
+ 		/* Device is active. We want to keep it. */
+-		if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) {
+-			list_for_each_entry(cqr, &device->ccw_queue, list)
+-				if (cqr->status == DASD_CQR_IN_IO)
+-					cqr->status = DASD_CQR_FAILED;
+-			device->stopped |= DASD_STOPPED_DC_EIO;
+-		} else {
+-			list_for_each_entry(cqr, &device->ccw_queue, list)
+-				if (cqr->status == DASD_CQR_IN_IO) {
+-					cqr->status = DASD_CQR_QUEUED;
+-					cqr->retries++;
+-				}
+-			device->stopped |= DASD_STOPPED_DC_WAIT;
+-			dasd_set_timer(device, 0);
+-		}
+-		dasd_schedule_bh(device);
++		list_for_each_entry(cqr, &device->ccw_queue, devlist)
++			if (cqr->status == DASD_CQR_IN_IO) {
++				cqr->status = DASD_CQR_QUEUED;
++				cqr->retries++;
++			}
++		device->stopped |= DASD_STOPPED_DC_WAIT;
++		dasd_device_clear_timer(device);
++		dasd_schedule_device_bh(device);
+ 		ret = 1;
+ 		break;
+ 	case CIO_OPER:
+ 		/* FIXME: add a sanity check. */
+-		device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO);
+-		dasd_schedule_bh(device);
++		device->stopped &= ~DASD_STOPPED_DC_WAIT;
++		dasd_schedule_device_bh(device);
++		if (device->block)
++			dasd_schedule_block_bh(device->block);
+ 		ret = 1;
+ 		break;
+ 	}
+@@ -2195,7 +2399,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
+ 	ccw->cda = (__u32)(addr_t)rdc_buffer;
+ 	ccw->count = rdc_buffer_size;
+ 
+-	cqr->device = device;
++	cqr->startdev = device;
++	cqr->memdev = device;
+ 	cqr->expires = 10*HZ;
+ 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ 	cqr->retries = 2;
+@@ -2217,13 +2422,12 @@ int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
+ 		return PTR_ERR(cqr);
+ 
+ 	ret = dasd_sleep_on(cqr);
+-	dasd_sfree_request(cqr, cqr->device);
++	dasd_sfree_request(cqr, cqr->memdev);
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
+ 
+-static int __init
+-dasd_init(void)
++static int __init dasd_init(void)
+ {
+ 	int rc;
+ 
+@@ -2231,7 +2435,7 @@ dasd_init(void)
+ 	init_waitqueue_head(&dasd_flush_wq);
+ 
+ 	/* register 'common' DASD debug area, used for all DBF_XXX calls */
+-	dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long));
++	dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
+ 	if (dasd_debug_area == NULL) {
+ 		rc = -ENOMEM;
+ 		goto failed;
+@@ -2277,15 +2481,18 @@ EXPORT_SYMBOL(dasd_diag_discipline_pointer);
+ EXPORT_SYMBOL(dasd_add_request_head);
+ EXPORT_SYMBOL(dasd_add_request_tail);
+ EXPORT_SYMBOL(dasd_cancel_req);
+-EXPORT_SYMBOL(dasd_clear_timer);
++EXPORT_SYMBOL(dasd_device_clear_timer);
++EXPORT_SYMBOL(dasd_block_clear_timer);
+ EXPORT_SYMBOL(dasd_enable_device);
+ EXPORT_SYMBOL(dasd_int_handler);
+ EXPORT_SYMBOL(dasd_kfree_request);
+ EXPORT_SYMBOL(dasd_kick_device);
+ EXPORT_SYMBOL(dasd_kmalloc_request);
+-EXPORT_SYMBOL(dasd_schedule_bh);
++EXPORT_SYMBOL(dasd_schedule_device_bh);
++EXPORT_SYMBOL(dasd_schedule_block_bh);
+ EXPORT_SYMBOL(dasd_set_target_state);
+-EXPORT_SYMBOL(dasd_set_timer);
++EXPORT_SYMBOL(dasd_device_set_timer);
++EXPORT_SYMBOL(dasd_block_set_timer);
+ EXPORT_SYMBOL(dasd_sfree_request);
+ EXPORT_SYMBOL(dasd_sleep_on);
+ EXPORT_SYMBOL(dasd_sleep_on_immediatly);
+@@ -2299,4 +2506,7 @@ EXPORT_SYMBOL_GPL(dasd_generic_remove);
+ EXPORT_SYMBOL_GPL(dasd_generic_notify);
+ EXPORT_SYMBOL_GPL(dasd_generic_set_online);
+ EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
+-
++EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
++EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
++EXPORT_SYMBOL_GPL(dasd_alloc_block);
++EXPORT_SYMBOL_GPL(dasd_free_block);
+diff --git a/drivers/s390/block/dasd_3370_erp.c b/drivers/s390/block/dasd_3370_erp.c
+deleted file mode 100644
+index 1ddab89..0000000
+--- a/drivers/s390/block/dasd_3370_erp.c
++++ /dev/null
+@@ -1,84 +0,0 @@
+-/*
+- * File...........: linux/drivers/s390/block/dasd_3370_erp.c
+- * Author(s)......: Holger Smolinski <Holger.Smolinski at de.ibm.com>
+- * Bugreports.to..: <Linux390 at de.ibm.com>
+- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
+- *
+- */
+-
+-#define PRINTK_HEADER "dasd_erp(3370)"
+-
+-#include "dasd_int.h"
+-
+-
+-/*
+- * DASD_3370_ERP_EXAMINE
+- *
+- * DESCRIPTION
+- *   Checks only for fatal/no/recover error.
+- *   A detailed examination of the sense data is done later outside
+- *   the interrupt handler.
+- *
+- *   The logic is based on the 'IBM 3880 Storage Control Reference' manual
+- *   'Chapter 7. 3370 Sense Data'.
+- *
+- * RETURN VALUES
+- *   dasd_era_none	no error
+- *   dasd_era_fatal	for all fatal (unrecoverable errors)
+- *   dasd_era_recover	for all others.
+- */
+-dasd_era_t
+-dasd_3370_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
+-{
+-	char *sense = irb->ecw;
+-
+-	/* check for successful execution first */
+-	if (irb->scsw.cstat == 0x00 &&
+-	    irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+-		return dasd_era_none;
+-	if (sense[0] & 0x80) {	/* CMD reject */
+-		return dasd_era_fatal;
+-	}
+-	if (sense[0] & 0x40) {	/* Drive offline */
+-		return dasd_era_recover;
+-	}
+-	if (sense[0] & 0x20) {	/* Bus out parity */
+-		return dasd_era_recover;
+-	}
+-	if (sense[0] & 0x10) {	/* equipment check */
+-		if (sense[1] & 0x80) {
+-			return dasd_era_fatal;
+-		}
+-		return dasd_era_recover;
+-	}
+-	if (sense[0] & 0x08) {	/* data check */
+-		if (sense[1] & 0x80) {
+-			return dasd_era_fatal;
+-		}
+-		return dasd_era_recover;
+-	}
+-	if (sense[0] & 0x04) {	/* overrun */
+-		if (sense[1] & 0x80) {
+-			return dasd_era_fatal;
+-		}
+-		return dasd_era_recover;
+-	}
+-	if (sense[1] & 0x40) {	/* invalid blocksize */
+-		return dasd_era_fatal;
+-	}
+-	if (sense[1] & 0x04) {	/* file protected */
+-		return dasd_era_recover;
+-	}
+-	if (sense[1] & 0x01) {	/* operation incomplete */
+-		return dasd_era_recover;
+-	}
+-	if (sense[2] & 0x80) {	/* check data erroor */
+-		return dasd_era_recover;
+-	}
+-	if (sense[2] & 0x10) {	/* Env. data present */
+-		return dasd_era_recover;
+-	}
+-	/* examine the 24 byte sense data */
+-	return dasd_era_recover;
+-
+-}				/* END dasd_3370_erp_examine */
+diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
+index 5b7385e..c361ab6 100644
+--- a/drivers/s390/block/dasd_3990_erp.c
++++ b/drivers/s390/block/dasd_3990_erp.c
+@@ -26,158 +26,6 @@ struct DCTL_data {
+ 
+ /*
+  *****************************************************************************
+- * SECTION ERP EXAMINATION
+- *****************************************************************************
+- */
+-
+-/*
+- * DASD_3990_ERP_EXAMINE_24
+- *
+- * DESCRIPTION
+- *   Checks only for fatal (unrecoverable) error.
+- *   A detailed examination of the sense data is done later outside
+- *   the interrupt handler.
+- *
+- *   Each bit configuration leading to an action code 2 (Exit with
+- *   programming error or unusual condition indication)
+- *   are handled as fatal errors.
+- *
+- *   All other configurations are handled as recoverable errors.
+- *
+- * RETURN VALUES
+- *   dasd_era_fatal	for all fatal (unrecoverable errors)
+- *   dasd_era_recover	for all others.
+- */
+-static dasd_era_t
+-dasd_3990_erp_examine_24(struct dasd_ccw_req * cqr, char *sense)
+-{
+-
+-	struct dasd_device *device = cqr->device;
+-
+-	/* check for 'Command Reject' */
+-	if ((sense[0] & SNS0_CMD_REJECT) &&
+-	    (!(sense[2] & SNS2_ENV_DATA_PRESENT))) {
+-
+-		DEV_MESSAGE(KERN_ERR, device, "%s",
+-			    "EXAMINE 24: Command Reject detected - "
+-			    "fatal error");
+-
+-		return dasd_era_fatal;
+-	}
+-
+-	/* check for 'Invalid Track Format' */
+-	if ((sense[1] & SNS1_INV_TRACK_FORMAT) &&
+-	    (!(sense[2] & SNS2_ENV_DATA_PRESENT))) {
+-
+-		DEV_MESSAGE(KERN_ERR, device, "%s",
+-			    "EXAMINE 24: Invalid Track Format detected "
+-			    "- fatal error");
+-
+-		return dasd_era_fatal;
+-	}
+-
+-	/* check for 'No Record Found' */
+-	if (sense[1] & SNS1_NO_REC_FOUND) {
+-
+-                /* FIXME: fatal error ?!? */
+-		DEV_MESSAGE(KERN_ERR, device,
+-			    "EXAMINE 24: No Record Found detected %s",
+-                            device->state <= DASD_STATE_BASIC ?
+-			    " " : "- fatal error");
+-
+-		return dasd_era_fatal;
+-	}
+-
+-	/* return recoverable for all others */
+-	return dasd_era_recover;
+-}				/* END dasd_3990_erp_examine_24 */
+-
+-/*
+- * DASD_3990_ERP_EXAMINE_32
+- *
+- * DESCRIPTION
+- *   Checks only for fatal/no/recoverable error.
+- *   A detailed examination of the sense data is done later outside
+- *   the interrupt handler.
+- *
+- * RETURN VALUES
+- *   dasd_era_none	no error
+- *   dasd_era_fatal	for all fatal (unrecoverable errors)
+- *   dasd_era_recover	for recoverable others.
+- */
+-static dasd_era_t
+-dasd_3990_erp_examine_32(struct dasd_ccw_req * cqr, char *sense)
+-{
+-
+-	struct dasd_device *device = cqr->device;
+-
+-	switch (sense[25]) {
+-	case 0x00:
+-		return dasd_era_none;
+-
+-	case 0x01:
+-		DEV_MESSAGE(KERN_ERR, device, "%s", "EXAMINE 32: fatal error");
+-
+-		return dasd_era_fatal;
+-
+-	default:
+-
+-		return dasd_era_recover;
+-	}
+-
+-}				/* end dasd_3990_erp_examine_32 */
+-
+-/*
+- * DASD_3990_ERP_EXAMINE
+- *
+- * DESCRIPTION
+- *   Checks only for fatal/no/recover error.
+- *   A detailed examination of the sense data is done later outside
+- *   the interrupt handler.
+- *
+- *   The logic is based on the 'IBM 3990 Storage Control  Reference' manual
+- *   'Chapter 7. Error Recovery Procedures'.
+- *
+- * RETURN VALUES
+- *   dasd_era_none	no error
+- *   dasd_era_fatal	for all fatal (unrecoverable errors)
+- *   dasd_era_recover	for all others.
+- */
+-dasd_era_t
+-dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
+-{
+-
+-	char *sense = irb->ecw;
+-	dasd_era_t era = dasd_era_recover;
+-	struct dasd_device *device = cqr->device;
+-
+-	/* check for successful execution first */
+-	if (irb->scsw.cstat == 0x00 &&
+-	    irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+-		return dasd_era_none;
+-
+-	/* distinguish between 24 and 32 byte sense data */
+-	if (sense[27] & DASD_SENSE_BIT_0) {
+-
+-		era = dasd_3990_erp_examine_24(cqr, sense);
+-
+-	} else {
+-
+-		era = dasd_3990_erp_examine_32(cqr, sense);
+-
+-	}
+-
+-	/* log the erp chain if fatal error occurred */
+-	if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) {
+-		dasd_log_sense(cqr, irb);
+-	}
+-
+-	return era;
+-
+-}				/* END dasd_3990_erp_examine */
+-
+-/*
+- *****************************************************************************
+  * SECTION ERP HANDLING
+  *****************************************************************************
+  */
+@@ -206,7 +54,7 @@ dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status)
+ {
+ 	struct dasd_ccw_req *cqr = erp->refers;
+ 
+-	dasd_free_erp_request(erp, erp->device);
++	dasd_free_erp_request(erp, erp->memdev);
+ 	cqr->status = final_status;
+ 	return cqr;
+ 
+@@ -224,15 +72,17 @@ static void
+ dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
+ {
+ 
+-	struct dasd_device *device = erp->device;
++	struct dasd_device *device = erp->startdev;
++	unsigned long flags;
+ 
+ 	DEV_MESSAGE(KERN_INFO, device,
+ 		    "blocking request queue for %is", expires/HZ);
+ 
++	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ 	device->stopped |= DASD_STOPPED_PENDING;
+-	erp->status = DASD_CQR_QUEUED;
+-
+-	dasd_set_timer(device, expires);
++	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
++	erp->status = DASD_CQR_FILLED;
++	dasd_block_set_timer(device->block, expires);
+ }
+ 
+ /*
+@@ -251,7 +101,7 @@ static struct dasd_ccw_req *
+ dasd_3990_erp_int_req(struct dasd_ccw_req * erp)
+ {
+ 
+-	struct dasd_device *device = erp->device;
++	struct dasd_device *device = erp->startdev;
+ 
+ 	/* first time set initial retry counter and erp_function */
+ 	/* and retry once without blocking queue		 */
+@@ -292,11 +142,14 @@ dasd_3990_erp_int_req(struct dasd_ccw_req * erp)
+ static void
+ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
+ {
+-	struct dasd_device *device = erp->device;
++	struct dasd_device *device = erp->startdev;
+ 	__u8 opm;
++	unsigned long flags;
+ 
+ 	/* try alternate valid path */
++	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ 	opm = ccw_device_get_path_mask(device->cdev);
++	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ 	//FIXME: start with get_opm ?
+ 	if (erp->lpm == 0)
+ 		erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum);
+@@ -309,9 +162,8 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
+ 			    "try alternate lpm=%x (lpum=%x / opm=%x)",
+ 			    erp->lpm, erp->irb.esw.esw0.sublog.lpum, opm);
+ 
+-		/* reset status to queued to handle the request again... */
+-		if (erp->status > DASD_CQR_QUEUED)
+-			erp->status = DASD_CQR_QUEUED;
++		/* reset status to submit the request again... */
++		erp->status = DASD_CQR_FILLED;
+ 		erp->retries = 1;
+ 	} else {
+ 		DEV_MESSAGE(KERN_ERR, device,
+@@ -320,8 +172,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
+ 			    erp->irb.esw.esw0.sublog.lpum, opm);
+ 
+ 		/* post request with permanent error */
+-		if (erp->status > DASD_CQR_QUEUED)
+-			erp->status = DASD_CQR_FAILED;
++		erp->status = DASD_CQR_FAILED;
+ 	}
+ }				/* end dasd_3990_erp_alternate_path */
+ 
+@@ -344,14 +195,14 @@ static struct dasd_ccw_req *
+ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
+ {
+ 
+-	struct dasd_device *device = erp->device;
++	struct dasd_device *device = erp->startdev;
+ 	struct DCTL_data *DCTL_data;
+ 	struct ccw1 *ccw;
+ 	struct dasd_ccw_req *dctl_cqr;
+ 
+ 	dctl_cqr = dasd_alloc_erp_request((char *) &erp->magic, 1,
+-					  sizeof (struct DCTL_data),
+-					  erp->device);
++					  sizeof(struct DCTL_data),
++					  device);
+ 	if (IS_ERR(dctl_cqr)) {
+ 		DEV_MESSAGE(KERN_ERR, device, "%s",
+ 			    "Unable to allocate DCTL-CQR");
+@@ -365,13 +216,14 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
+ 	DCTL_data->modifier = modifier;
+ 
+ 	ccw = dctl_cqr->cpaddr;
+-	memset(ccw, 0, sizeof (struct ccw1));
++	memset(ccw, 0, sizeof(struct ccw1));
+ 	ccw->cmd_code = CCW_CMD_DCTL;
+ 	ccw->count = 4;
+ 	ccw->cda = (__u32)(addr_t) DCTL_data;
+ 	dctl_cqr->function = dasd_3990_erp_DCTL;
+ 	dctl_cqr->refers = erp;
+-	dctl_cqr->device = erp->device;
++	dctl_cqr->startdev = device;
++	dctl_cqr->memdev = device;
+ 	dctl_cqr->magic = erp->magic;
+ 	dctl_cqr->expires = 5 * 60 * HZ;
+ 	dctl_cqr->retries = 2;
+@@ -435,7 +287,7 @@ static struct dasd_ccw_req *
+ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
+ {
+ 
+-	struct dasd_device *device = erp->device;
++	struct dasd_device *device = erp->startdev;
+ 
+ 	/* first time set initial retry counter and erp_function    */
+ 	/* and retry once without waiting for state change pending  */
+@@ -472,7 +324,7 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
+ 				     "redriving request immediately, "
+ 				     "%d retries left",
+ 				     erp->retries);
+-			erp->status = DASD_CQR_QUEUED;
++			erp->status = DASD_CQR_FILLED;
+ 		}
+ 	}
+ 
+@@ -530,7 +382,7 @@ static void
+ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
+ {
+ 
+-	struct dasd_device *device = erp->device;
++	struct dasd_device *device = erp->startdev;
+ 	char msg_format = (sense[7] & 0xF0);
+ 	char msg_no = (sense[7] & 0x0F);
+ 
+@@ -1157,7 +1009,7 @@ static struct dasd_ccw_req *
+ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
+ {
+ 
+-	struct dasd_device *device = erp->device;
++	struct dasd_device *device = erp->startdev;
+ 
+ 	erp->function = dasd_3990_erp_com_rej;
+ 
+@@ -1198,7 +1050,7 @@ static struct dasd_ccw_req *
+ dasd_3990_erp_bus_out(struct dasd_ccw_req * erp)
+ {
+ 
+-	struct dasd_device *device = erp->device;
++	struct dasd_device *device = erp->startdev;
+ 
+ 	/* first time set initial retry counter and erp_function */
+ 	/* and retry once without blocking queue		 */
+@@ -1237,7 +1089,7 @@ static struct dasd_ccw_req *
+ dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
+ {
+ 
+-	struct dasd_device *device = erp->device;
++	struct dasd_device *device = erp->startdev;
+ 
+ 	erp->function = dasd_3990_erp_equip_check;
+ 
+@@ -1279,7 +1131,6 @@ dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
+ 
+ 		erp = dasd_3990_erp_action_5(erp);
+ 	}
+-
+ 	return erp;
+ 
+ }				/* end dasd_3990_erp_equip_check */
+@@ -1299,7 +1150,7 @@ static struct dasd_ccw_req *
+ dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense)
+ {
+ 
+-	struct dasd_device *device = erp->device;
++	struct dasd_device *device = erp->startdev;
+ 
+ 	erp->function = dasd_3990_erp_data_check;
+ 
+@@ -1358,7 +1209,7 @@ static struct dasd_ccw_req *
+ dasd_3990_erp_overrun(struct dasd_ccw_req * erp, char *sense)
+ {
+ 
+-	struct dasd_device *device = erp->device;
++	struct dasd_device *device = erp->startdev;
+ 
+ 	erp->function = dasd_3990_erp_overrun;
+ 
+@@ -1387,7 +1238,7 @@ static struct dasd_ccw_req *
+ dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
+ {
+ 
+-	struct dasd_device *device = erp->device;
++	struct dasd_device *device = erp->startdev;
+ 
+ 	erp->function = dasd_3990_erp_inv_format;
+ 
+@@ -1403,8 +1254,7 @@ dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
+ 
+ 	} else {
+ 		DEV_MESSAGE(KERN_ERR, device, "%s",
+-			    "Invalid Track Format - Fatal error should have "
+-			    "been handled within the interrupt handler");
++			    "Invalid Track Format - Fatal error");
+ 
+ 		erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+ 	}
+@@ -1428,7 +1278,7 @@ static struct dasd_ccw_req *
+ dasd_3990_erp_EOC(struct dasd_ccw_req * default_erp, char *sense)
+ {
+ 
+-	struct dasd_device *device = default_erp->device;
++	struct dasd_device *device = default_erp->startdev;
+ 
+ 	DEV_MESSAGE(KERN_ERR, device, "%s",
+ 		    "End-of-Cylinder - must never happen");
+@@ -1453,7 +1303,7 @@ static struct dasd_ccw_req *
+ dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense)
+ {
+ 
+-	struct dasd_device *device = erp->device;
++	struct dasd_device *device = erp->startdev;
+ 
+ 	erp->function = dasd_3990_erp_env_data;
+ 
+@@ -1463,11 +1313,9 @@ dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense)
+ 
+ 	/* don't retry on disabled interface */
+ 	if (sense[7] != 0x0F) {
+-
+ 		erp = dasd_3990_erp_action_4(erp, sense);
+ 	} else {
+-
+-		erp = dasd_3990_erp_cleanup(erp, DASD_CQR_IN_IO);
++		erp->status = DASD_CQR_FILLED;
+ 	}
+ 
+ 	return erp;
+@@ -1490,11 +1338,10 @@ static struct dasd_ccw_req *
+ dasd_3990_erp_no_rec(struct dasd_ccw_req * default_erp, char *sense)
+ {
+ 
+-	struct dasd_device *device = default_erp->device;
++	struct dasd_device *device = default_erp->startdev;
+ 
+ 	DEV_MESSAGE(KERN_ERR, device, "%s",
+-		    "No Record Found - Fatal error should "
+-		    "have been handled within the interrupt handler");
++		    "No Record Found - Fatal error ");
+ 
+ 	return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
+ 
+@@ -1517,7 +1364,7 @@ static struct dasd_ccw_req *
+ dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
+ {
+ 
+-	struct dasd_device *device = erp->device;
++	struct dasd_device *device = erp->startdev;
+ 
+ 	DEV_MESSAGE(KERN_ERR, device, "%s", "File Protected");
+ 
+@@ -1526,6 +1373,43 @@ dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
+ }				/* end dasd_3990_erp_file_prot */
+ 
+ /*
++ * DASD_3990_ERP_INSPECT_ALIAS
++ *
++ * DESCRIPTION
++ *   Checks if the original request was started on an alias device.
++ *   If yes, it modifies the original and the erp request so that
++ *   the erp request can be started on a base device.
++ *
++ * PARAMETER
++ *   erp		pointer to the currently created default ERP
++ *
++ * RETURN VALUES
++ *   erp		pointer to the modified ERP, or NULL
++ */
++
++static struct dasd_ccw_req *dasd_3990_erp_inspect_alias(
++						struct dasd_ccw_req *erp)
++{
++	struct dasd_ccw_req *cqr = erp->refers;
++
++	if (cqr->block &&
++	    (cqr->block->base != cqr->startdev)) {
++		if (cqr->startdev->features & DASD_FEATURE_ERPLOG) {
++			DEV_MESSAGE(KERN_ERR, cqr->startdev,
++				    "ERP on alias device for request %p,"
++				    " recover on base device %s", cqr,
++				    cqr->block->base->cdev->dev.bus_id);
++		}
++		dasd_eckd_reset_ccw_to_base_io(cqr);
++		erp->startdev = cqr->block->base;
++		erp->function = dasd_3990_erp_inspect_alias;
++		return erp;
++	} else
++		return NULL;
++}
++
++
++/*
+  * DASD_3990_ERP_INSPECT_24
+  *
+  * DESCRIPTION
+@@ -1623,7 +1507,7 @@ static struct dasd_ccw_req *
+ dasd_3990_erp_action_10_32(struct dasd_ccw_req * erp, char *sense)
+ {
+ 
+-	struct dasd_device *device = erp->device;
++	struct dasd_device *device = erp->startdev;
+ 
+ 	erp->retries = 256;
+ 	erp->function = dasd_3990_erp_action_10_32;
+@@ -1657,13 +1541,14 @@ static struct dasd_ccw_req *
+ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
+ {
+ 
+-	struct dasd_device *device = default_erp->device;
++	struct dasd_device *device = default_erp->startdev;
+ 	__u32 cpa = 0;
+ 	struct dasd_ccw_req *cqr;
+ 	struct dasd_ccw_req *erp;
+ 	struct DE_eckd_data *DE_data;
++	struct PFX_eckd_data *PFX_data;
+ 	char *LO_data;		/* LO_eckd_data_t */
+-	struct ccw1 *ccw;
++	struct ccw1 *ccw, *oldccw;
+ 
+ 	DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ 		    "Write not finished because of unexpected condition");
+@@ -1702,8 +1587,8 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
+ 	/* Build new ERP request including DE/LO */
+ 	erp = dasd_alloc_erp_request((char *) &cqr->magic,
+ 				     2 + 1,/* DE/LO + TIC */
+-				     sizeof (struct DE_eckd_data) +
+-				     sizeof (struct LO_eckd_data), device);
++				     sizeof(struct DE_eckd_data) +
++				     sizeof(struct LO_eckd_data), device);
+ 
+ 	if (IS_ERR(erp)) {
+ 		DEV_MESSAGE(KERN_ERR, device, "%s", "Unable to allocate ERP");
+@@ -1712,10 +1597,16 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
+ 
+ 	/* use original DE */
+ 	DE_data = erp->data;
+-	memcpy(DE_data, cqr->data, sizeof (struct DE_eckd_data));
++	oldccw = cqr->cpaddr;
++	if (oldccw->cmd_code == DASD_ECKD_CCW_PFX) {
++		PFX_data = cqr->data;
++		memcpy(DE_data, &PFX_data->define_extend,
++		       sizeof(struct DE_eckd_data));
++	} else
++		memcpy(DE_data, cqr->data, sizeof(struct DE_eckd_data));
+ 
+ 	/* create LO */
+-	LO_data = erp->data + sizeof (struct DE_eckd_data);
++	LO_data = erp->data + sizeof(struct DE_eckd_data);
+ 
+ 	if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
+ 
+@@ -1748,7 +1639,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
+ 
+ 	/* create DE ccw */
+ 	ccw = erp->cpaddr;
+-	memset(ccw, 0, sizeof (struct ccw1));
++	memset(ccw, 0, sizeof(struct ccw1));
+ 	ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
+ 	ccw->flags = CCW_FLAG_CC;
+ 	ccw->count = 16;
+@@ -1756,7 +1647,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
+ 
+ 	/* create LO ccw */
+ 	ccw++;
+-	memset(ccw, 0, sizeof (struct ccw1));
++	memset(ccw, 0, sizeof(struct ccw1));
+ 	ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
+ 	ccw->flags = CCW_FLAG_CC;
+ 	ccw->count = 16;
+@@ -1770,7 +1661,8 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
+ 	/* fill erp related fields */
+ 	erp->function = dasd_3990_erp_action_1B_32;
+ 	erp->refers = default_erp->refers;
+-	erp->device = device;
++	erp->startdev = device;
++	erp->memdev = device;
+ 	erp->magic = default_erp->magic;
+ 	erp->expires = 0;
+ 	erp->retries = 256;
+@@ -1803,7 +1695,7 @@ static struct dasd_ccw_req *
+ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
+ {
+ 
+-	struct dasd_device *device = previous_erp->device;
++	struct dasd_device *device = previous_erp->startdev;
+ 	__u32 cpa = 0;
+ 	struct dasd_ccw_req *cqr;
+ 	struct dasd_ccw_req *erp;
+@@ -1827,7 +1719,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
+ 		DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ 			    "Imprecise ending is set - just retry");
+ 
+-		previous_erp->status = DASD_CQR_QUEUED;
++		previous_erp->status = DASD_CQR_FILLED;
+ 
+ 		return previous_erp;
+ 	}
+@@ -1850,7 +1742,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
+ 	erp = previous_erp;
+ 
+ 	/* update the LO with the new returned sense data  */
+-	LO_data = erp->data + sizeof (struct DE_eckd_data);
++	LO_data = erp->data + sizeof(struct DE_eckd_data);
+ 
+ 	if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
+ 
+@@ -1889,7 +1781,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
+ 	ccw++;			/* addr of TIC ccw */
+ 	ccw->cda = cpa;
+ 
+-	erp->status = DASD_CQR_QUEUED;
++	erp->status = DASD_CQR_FILLED;
+ 
+ 	return erp;
+ 
+@@ -1968,9 +1860,7 @@ dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
+ 			 * try further actions. */
+ 
+ 			erp->lpm = 0;
+-
+-			erp->status = DASD_CQR_ERROR;
+-
++			erp->status = DASD_CQR_NEED_ERP;
+ 		}
+ 	}
+ 
+@@ -2047,7 +1937,7 @@ dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense)
+ 	if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) {
+ 
+ 		/* set to suspended duplex state then restart */
+-		struct dasd_device *device = erp->device;
++		struct dasd_device *device = erp->startdev;
+ 
+ 		DEV_MESSAGE(KERN_ERR, device, "%s",
+ 			    "Set device to suspended duplex state should be "
+@@ -2081,28 +1971,26 @@ dasd_3990_erp_compound(struct dasd_ccw_req * erp, char *sense)
+ {
+ 
+ 	if ((erp->function == dasd_3990_erp_compound_retry) &&
+-	    (erp->status == DASD_CQR_ERROR)) {
++	    (erp->status == DASD_CQR_NEED_ERP)) {
+ 
+ 		dasd_3990_erp_compound_path(erp, sense);
+ 	}
+ 
+ 	if ((erp->function == dasd_3990_erp_compound_path) &&
+-	    (erp->status == DASD_CQR_ERROR)) {
++	    (erp->status == DASD_CQR_NEED_ERP)) {
+ 
+ 		erp = dasd_3990_erp_compound_code(erp, sense);
+ 	}
+ 
+ 	if ((erp->function == dasd_3990_erp_compound_code) &&
+-	    (erp->status == DASD_CQR_ERROR)) {
++	    (erp->status == DASD_CQR_NEED_ERP)) {
+ 
+ 		dasd_3990_erp_compound_config(erp, sense);
+ 	}
+ 
+ 	/* if no compound action ERP specified, the request failed */
+-	if (erp->status == DASD_CQR_ERROR) {
+-
++	if (erp->status == DASD_CQR_NEED_ERP)
+ 		erp->status = DASD_CQR_FAILED;
+-	}
+ 
+ 	return erp;
+ 
+@@ -2127,7 +2015,7 @@ static struct dasd_ccw_req *
+ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
+ {
+ 
+-	struct dasd_device *device = erp->device;
++	struct dasd_device *device = erp->startdev;
+ 
+ 	erp->function = dasd_3990_erp_inspect_32;
+ 
+@@ -2149,8 +2037,7 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
+ 
+ 		case 0x01:	/* fatal error */
+ 			DEV_MESSAGE(KERN_ERR, device, "%s",
+-				    "Fatal error should have been "
+-				    "handled within the interrupt handler");
++				    "Retry not recommended - Fatal error");
+ 
+ 			erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+ 			break;
+@@ -2253,6 +2140,11 @@ dasd_3990_erp_inspect(struct dasd_ccw_req * erp)
+ 	/* already set up new ERP !			      */
+ 	char *sense = erp->refers->irb.ecw;
+ 
++	/* if this problem occured on an alias retry on base */
++	erp_new = dasd_3990_erp_inspect_alias(erp);
++	if (erp_new)
++		return erp_new;
++
+ 	/* distinguish between 24 and 32 byte sense data */
+ 	if (sense[27] & DASD_SENSE_BIT_0) {
+ 
+@@ -2287,13 +2179,13 @@ static struct dasd_ccw_req *
+ dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
+ {
+ 
+-	struct dasd_device *device = cqr->device;
++	struct dasd_device *device = cqr->startdev;
+ 	struct ccw1 *ccw;
+ 
+ 	/* allocate additional request block */
+ 	struct dasd_ccw_req *erp;
+ 
+-	erp = dasd_alloc_erp_request((char *) &cqr->magic, 2, 0, cqr->device);
++	erp = dasd_alloc_erp_request((char *) &cqr->magic, 2, 0, device);
+ 	if (IS_ERR(erp)) {
+                 if (cqr->retries <= 0) {
+ 		        DEV_MESSAGE(KERN_ERR, device, "%s",
+@@ -2305,7 +2197,7 @@ dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
+                                      "Unable to allocate ERP request "
+ 				     "(%i retries left)",
+                                      cqr->retries);
+-			dasd_set_timer(device, (HZ << 3));
++			dasd_block_set_timer(device->block, (HZ << 3));
+                 }
+ 		return cqr;
+ 	}
+@@ -2319,7 +2211,9 @@ dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
+ 	ccw->cda      = (long)(cqr->cpaddr);
+ 	erp->function = dasd_3990_erp_add_erp;
+ 	erp->refers   = cqr;
+-	erp->device   = cqr->device;
++	erp->startdev = device;
++	erp->memdev   = device;
++	erp->block    = cqr->block;
+ 	erp->magic    = cqr->magic;
+ 	erp->expires  = 0;
+ 	erp->retries  = 256;
+@@ -2466,7 +2360,7 @@ static struct dasd_ccw_req *
+ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
+ {
+ 
+-	struct dasd_device *device = erp->device;
++	struct dasd_device *device = erp->startdev;
+ 	char *sense = erp->irb.ecw;
+ 
+ 	/* check for 24 byte sense ERP */
+@@ -2557,7 +2451,7 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
+ 			       struct dasd_ccw_req *erp)
+ {
+ 
+-	struct dasd_device *device = erp_head->device;
++	struct dasd_device *device = erp_head->startdev;
+ 	struct dasd_ccw_req *erp_done = erp_head;	/* finished req */
+ 	struct dasd_ccw_req *erp_free = NULL;	/* req to be freed */
+ 
+@@ -2569,13 +2463,13 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
+ 			      "original request was lost\n");
+ 
+ 		/* remove the request from the device queue */
+-		list_del(&erp_done->list);
++		list_del(&erp_done->blocklist);
+ 
+ 		erp_free = erp_done;
+ 		erp_done = erp_done->refers;
+ 
+ 		/* free the finished erp request */
+-		dasd_free_erp_request(erp_free, erp_free->device);
++		dasd_free_erp_request(erp_free, erp_free->memdev);
+ 
+ 	}			/* end while */
+ 
+@@ -2603,7 +2497,7 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
+ 				    erp->retries, erp);
+ 
+ 			/* handle the request again... */
+-			erp->status = DASD_CQR_QUEUED;
++			erp->status = DASD_CQR_FILLED;
+ 		}
+ 
+ 	} else {
+@@ -2620,7 +2514,7 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
+  * DASD_3990_ERP_ACTION
+  *
+  * DESCRIPTION
+- *   controll routine for 3990 erp actions.
++ *   control routine for 3990 erp actions.
+  *   Has to be called with the queue lock (namely the s390_irq_lock) acquired.
+  *
+  * PARAMETER
+@@ -2636,9 +2530,8 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
+ struct dasd_ccw_req *
+ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
+ {
+-
+ 	struct dasd_ccw_req *erp = NULL;
+-	struct dasd_device *device = cqr->device;
++	struct dasd_device *device = cqr->startdev;
+ 	struct dasd_ccw_req *temp_erp = NULL;
+ 
+ 	if (device->features & DASD_FEATURE_ERPLOG) {
+@@ -2704,10 +2597,11 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
+ 		}
+ 	}
+ 
+-	/* enqueue added ERP request */
+-	if (erp->status == DASD_CQR_FILLED) {
+-		erp->status = DASD_CQR_QUEUED;
+-		list_add(&erp->list, &device->ccw_queue);
++	/* enqueue ERP request if it's a new one */
++	if (list_empty(&erp->blocklist)) {
++		cqr->status = DASD_CQR_IN_ERP;
++		/* add erp request before the cqr */
++		list_add_tail(&erp->blocklist, &cqr->blocklist);
+ 	}
+ 
+ 	return erp;
+diff --git a/drivers/s390/block/dasd_9336_erp.c b/drivers/s390/block/dasd_9336_erp.c
+deleted file mode 100644
+index 6e08268..0000000
+--- a/drivers/s390/block/dasd_9336_erp.c
++++ /dev/null
+@@ -1,41 +0,0 @@
+-/*
+- * File...........: linux/drivers/s390/block/dasd_9336_erp.c
+- * Author(s)......: Holger Smolinski <Holger.Smolinski at de.ibm.com>
+- * Bugreports.to..: <Linux390 at de.ibm.com>
+- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
+- *
+- */
+-
+-#define PRINTK_HEADER "dasd_erp(9336)"
+-
+-#include "dasd_int.h"
+-
+-
+-/*
+- * DASD_9336_ERP_EXAMINE
+- *
+- * DESCRIPTION
+- *   Checks only for fatal/no/recover error.
+- *   A detailed examination of the sense data is done later outside
+- *   the interrupt handler.
+- *
+- *   The logic is based on the 'IBM 3880 Storage Control Reference' manual
+- *   'Chapter 7. 9336 Sense Data'.
+- *
+- * RETURN VALUES
+- *   dasd_era_none	no error
+- *   dasd_era_fatal	for all fatal (unrecoverable errors)
+- *   dasd_era_recover	for all others.
+- */
+-dasd_era_t
+-dasd_9336_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
+-{
+-	/* check for successful execution first */
+-	if (irb->scsw.cstat == 0x00 &&
+-	    irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+-		return dasd_era_none;
+-
+-	/* examine the 24 byte sense data */
+-	return dasd_era_recover;
+-
+-}				/* END dasd_9336_erp_examine */
+diff --git a/drivers/s390/block/dasd_9343_erp.c b/drivers/s390/block/dasd_9343_erp.c
+deleted file mode 100644
+index ddecb98..0000000
+--- a/drivers/s390/block/dasd_9343_erp.c
++++ /dev/null
+@@ -1,21 +0,0 @@
+-/*
+- * File...........: linux/drivers/s390/block/dasd_9345_erp.c
+- * Author(s)......: Holger Smolinski <Holger.Smolinski at de.ibm.com>
+- * Bugreports.to..: <Linux390 at de.ibm.com>
+- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
+- *
+- */
+-
+-#define PRINTK_HEADER "dasd_erp(9343)"
+-
+-#include "dasd_int.h"
+-
+-dasd_era_t
+-dasd_9343_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
+-{
+-	if (irb->scsw.cstat == 0x00 &&
+-	    irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+-		return dasd_era_none;
+-
+-	return dasd_era_recover;
+-}
+diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
+new file mode 100644
+index 0000000..3a40bee
+--- /dev/null
++++ b/drivers/s390/block/dasd_alias.c
+@@ -0,0 +1,903 @@
++/*
++ * PAV alias management for the DASD ECKD discipline
++ *
++ * Copyright IBM Corporation, 2007
++ * Author(s): Stefan Weinhuber <wein at de.ibm.com>
++ */
++
++#include <linux/list.h>
++#include <asm/ebcdic.h>
++#include "dasd_int.h"
++#include "dasd_eckd.h"
++
++#ifdef PRINTK_HEADER
++#undef PRINTK_HEADER
++#endif				/* PRINTK_HEADER */
++#define PRINTK_HEADER "dasd(eckd):"
++
++
++/*
++ * General concept of alias management:
++ * - PAV and DASD alias management is specific to the eckd discipline.
++ * - A device is connected to an lcu as long as the device exists.
++ *   dasd_alias_make_device_known_to_lcu will be called wenn the
++ *   device is checked by the eckd discipline and
++ *   dasd_alias_disconnect_device_from_lcu will be called
++ *   before the device is deleted.
++ * - The dasd_alias_add_device / dasd_alias_remove_device
++ *   functions mark the point when a device is 'ready for service'.
++ * - A summary unit check is a rare occasion, but it is mandatory to
++ *   support it. It requires some complex recovery actions before the
++ *   devices can be used again (see dasd_alias_handle_summary_unit_check).
++ * - dasd_alias_get_start_dev will find an alias device that can be used
++ *   instead of the base device and does some (very simple) load balancing.
++ *   This is the function that gets called for each I/O, so when improving
++ *   something, this function should get faster or better, the rest has just
++ *   to be correct.
++ */
++
++
++static void summary_unit_check_handling_work(struct work_struct *);
++static void lcu_update_work(struct work_struct *);
++static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
++
++static struct alias_root aliastree = {
++	.serverlist = LIST_HEAD_INIT(aliastree.serverlist),
++	.lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
++};
++
++static struct alias_server *_find_server(struct dasd_uid *uid)
++{
++	struct alias_server *pos;
++	list_for_each_entry(pos, &aliastree.serverlist, server) {
++		if (!strncmp(pos->uid.vendor, uid->vendor,
++			     sizeof(uid->vendor))
++		    && !strncmp(pos->uid.serial, uid->serial,
++				sizeof(uid->serial)))
++			return pos;
++	};
++	return NULL;
++}
++
++static struct alias_lcu *_find_lcu(struct alias_server *server,
++				   struct dasd_uid *uid)
++{
++	struct alias_lcu *pos;
++	list_for_each_entry(pos, &server->lculist, lcu) {
++		if (pos->uid.ssid == uid->ssid)
++			return pos;
++	};
++	return NULL;
++}
++
++static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
++					   struct dasd_uid *uid)
++{
++	struct alias_pav_group *pos;
++	__u8 search_unit_addr;
++
++	/* for hyper pav there is only one group */
++	if (lcu->pav == HYPER_PAV) {
++		if (list_empty(&lcu->grouplist))
++			return NULL;
++		else
++			return list_first_entry(&lcu->grouplist,
++						struct alias_pav_group, group);
++	}
++
++	/* for base pav we have to find the group that matches the base */
++	if (uid->type == UA_BASE_DEVICE)
++		search_unit_addr = uid->real_unit_addr;
++	else
++		search_unit_addr = uid->base_unit_addr;
++	list_for_each_entry(pos, &lcu->grouplist, group) {
++		if (pos->uid.base_unit_addr == search_unit_addr)
++			return pos;
++	};
++	return NULL;
++}
++
++static struct alias_server *_allocate_server(struct dasd_uid *uid)
++{
++	struct alias_server *server;
++
++	server = kzalloc(sizeof(*server), GFP_KERNEL);
++	if (!server)
++		return ERR_PTR(-ENOMEM);
++	memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
++	memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
++	INIT_LIST_HEAD(&server->server);
++	INIT_LIST_HEAD(&server->lculist);
++	return server;
++}
++
++static void _free_server(struct alias_server *server)
++{
++	kfree(server);
++}
++
++static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
++{
++	struct alias_lcu *lcu;
++
++	lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
++	if (!lcu)
++		return ERR_PTR(-ENOMEM);
++	lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
++	if (!lcu->uac)
++		goto out_err1;
++	lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
++	if (!lcu->rsu_cqr)
++		goto out_err2;
++	lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
++				       GFP_KERNEL | GFP_DMA);
++	if (!lcu->rsu_cqr->cpaddr)
++		goto out_err3;
++	lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
++	if (!lcu->rsu_cqr->data)
++		goto out_err4;
++
++	memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
++	memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
++	lcu->uid.ssid = uid->ssid;
++	lcu->pav = NO_PAV;
++	lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
++	INIT_LIST_HEAD(&lcu->lcu);
++	INIT_LIST_HEAD(&lcu->inactive_devices);
++	INIT_LIST_HEAD(&lcu->active_devices);
++	INIT_LIST_HEAD(&lcu->grouplist);
++	INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
++	INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
++	spin_lock_init(&lcu->lock);
++	return lcu;
++
++out_err4:
++	kfree(lcu->rsu_cqr->cpaddr);
++out_err3:
++	kfree(lcu->rsu_cqr);
++out_err2:
++	kfree(lcu->uac);
++out_err1:
++	kfree(lcu);
++	return ERR_PTR(-ENOMEM);
++}
++
++static void _free_lcu(struct alias_lcu *lcu)
++{
++	kfree(lcu->rsu_cqr->data);
++	kfree(lcu->rsu_cqr->cpaddr);
++	kfree(lcu->rsu_cqr);
++	kfree(lcu->uac);
++	kfree(lcu);
++}
++
++/*
++ * This is the function that will allocate all the server and lcu data,
++ * so this function must be called first for a new device.
++ * If the return value is 1, the lcu was already known before, if it
++ * is 0, this is a new lcu.
++ * Negative return code indicates that something went wrong (e.g. -ENOMEM)
++ */
++int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
++{
++	struct dasd_eckd_private *private;
++	unsigned long flags;
++	struct alias_server *server, *newserver;
++	struct alias_lcu *lcu, *newlcu;
++	int is_lcu_known;
++	struct dasd_uid *uid;
++
++	private = (struct dasd_eckd_private *) device->private;
++	uid = &private->uid;
++	spin_lock_irqsave(&aliastree.lock, flags);
++	is_lcu_known = 1;
++	server = _find_server(uid);
++	if (!server) {
++		spin_unlock_irqrestore(&aliastree.lock, flags);
++		newserver = _allocate_server(uid);
++		if (IS_ERR(newserver))
++			return PTR_ERR(newserver);
++		spin_lock_irqsave(&aliastree.lock, flags);
++		server = _find_server(uid);
++		if (!server) {
++			list_add(&newserver->server, &aliastree.serverlist);
++			server = newserver;
++			is_lcu_known = 0;
++		} else {
++			/* someone was faster */
++			_free_server(newserver);
++		}
++	}
++
++	lcu = _find_lcu(server, uid);
++	if (!lcu) {
++		spin_unlock_irqrestore(&aliastree.lock, flags);
++		newlcu = _allocate_lcu(uid);
++		if (IS_ERR(newlcu))
++			return PTR_ERR(lcu);
++		spin_lock_irqsave(&aliastree.lock, flags);
++		lcu = _find_lcu(server, uid);
++		if (!lcu) {
++			list_add(&newlcu->lcu, &server->lculist);
++			lcu = newlcu;
++			is_lcu_known = 0;
++		} else {
++			/* someone was faster */
++			_free_lcu(newlcu);
++		}
++		is_lcu_known = 0;
++	}
++	spin_lock(&lcu->lock);
++	list_add(&device->alias_list, &lcu->inactive_devices);
++	private->lcu = lcu;
++	spin_unlock(&lcu->lock);
++	spin_unlock_irqrestore(&aliastree.lock, flags);
++
++	return is_lcu_known;
++}
++
++/*
++ * This function removes a device from the scope of alias management.
++ * The complicated part is to make sure that it is not in use by
++ * any of the workers. If necessary cancel the work.
++ */
++void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
++{
++	struct dasd_eckd_private *private;
++	unsigned long flags;
++	struct alias_lcu *lcu;
++	struct alias_server *server;
++	int was_pending;
++
++	private = (struct dasd_eckd_private *) device->private;
++	lcu = private->lcu;
++	spin_lock_irqsave(&lcu->lock, flags);
++	list_del_init(&device->alias_list);
++	/* make sure that the workers don't use this device */
++	if (device == lcu->suc_data.device) {
++		spin_unlock_irqrestore(&lcu->lock, flags);
++		cancel_work_sync(&lcu->suc_data.worker);
++		spin_lock_irqsave(&lcu->lock, flags);
++		if (device == lcu->suc_data.device)
++			lcu->suc_data.device = NULL;
++	}
++	was_pending = 0;
++	if (device == lcu->ruac_data.device) {
++		spin_unlock_irqrestore(&lcu->lock, flags);
++		was_pending = 1;
++		cancel_delayed_work_sync(&lcu->ruac_data.dwork);
++		spin_lock_irqsave(&lcu->lock, flags);
++		if (device == lcu->ruac_data.device)
++			lcu->ruac_data.device = NULL;
++	}
++	private->lcu = NULL;
++	spin_unlock_irqrestore(&lcu->lock, flags);
++
++	spin_lock_irqsave(&aliastree.lock, flags);
++	spin_lock(&lcu->lock);
++	if (list_empty(&lcu->grouplist) &&
++	    list_empty(&lcu->active_devices) &&
++	    list_empty(&lcu->inactive_devices)) {
++		list_del(&lcu->lcu);
++		spin_unlock(&lcu->lock);
++		_free_lcu(lcu);
++		lcu = NULL;
++	} else {
++		if (was_pending)
++			_schedule_lcu_update(lcu, NULL);
++		spin_unlock(&lcu->lock);
++	}
++	server = _find_server(&private->uid);
++	if (server && list_empty(&server->lculist)) {
++		list_del(&server->server);
++		_free_server(server);
++	}
++	spin_unlock_irqrestore(&aliastree.lock, flags);
++}
++
++/*
++ * This function assumes that the unit address configuration stored
++ * in the lcu is up to date and will update the device uid before
++ * adding it to a pav group.
++ */
++static int _add_device_to_lcu(struct alias_lcu *lcu,
++			      struct dasd_device *device)
++{
++
++	struct dasd_eckd_private *private;
++	struct alias_pav_group *group;
++	struct dasd_uid *uid;
++
++	private = (struct dasd_eckd_private *) device->private;
++	uid = &private->uid;
++	uid->type = lcu->uac->unit[uid->real_unit_addr].ua_type;
++	uid->base_unit_addr = lcu->uac->unit[uid->real_unit_addr].base_ua;
++	dasd_set_uid(device->cdev, &private->uid);
++
++	/* if we have no PAV anyway, we don't need to bother with PAV groups */
++	if (lcu->pav == NO_PAV) {
++		list_move(&device->alias_list, &lcu->active_devices);
++		return 0;
++	}
++
++	group = _find_group(lcu, uid);
++	if (!group) {
++		group = kzalloc(sizeof(*group), GFP_ATOMIC);
++		if (!group)
++			return -ENOMEM;
++		memcpy(group->uid.vendor, uid->vendor, sizeof(uid->vendor));
++		memcpy(group->uid.serial, uid->serial, sizeof(uid->serial));
++		group->uid.ssid = uid->ssid;
++		if (uid->type == UA_BASE_DEVICE)
++			group->uid.base_unit_addr = uid->real_unit_addr;
++		else
++			group->uid.base_unit_addr = uid->base_unit_addr;
++		INIT_LIST_HEAD(&group->group);
++		INIT_LIST_HEAD(&group->baselist);
++		INIT_LIST_HEAD(&group->aliaslist);
++		list_add(&group->group, &lcu->grouplist);
++	}
++	if (uid->type == UA_BASE_DEVICE)
++		list_move(&device->alias_list, &group->baselist);
++	else
++		list_move(&device->alias_list, &group->aliaslist);
++	private->pavgroup = group;
++	return 0;
++};
++
++static void _remove_device_from_lcu(struct alias_lcu *lcu,
++				    struct dasd_device *device)
++{
++	struct dasd_eckd_private *private;
++	struct alias_pav_group *group;
++
++	private = (struct dasd_eckd_private *) device->private;
++	list_move(&device->alias_list, &lcu->inactive_devices);
++	group = private->pavgroup;
++	if (!group)
++		return;
++	private->pavgroup = NULL;
++	if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
++		list_del(&group->group);
++		kfree(group);
++		return;
++	}
++	if (group->next == device)
++		group->next = NULL;
++};
++
++static int read_unit_address_configuration(struct dasd_device *device,
++					   struct alias_lcu *lcu)
++{
++	struct dasd_psf_prssd_data *prssdp;
++	struct dasd_ccw_req *cqr;
++	struct ccw1 *ccw;
++	int rc;
++	unsigned long flags;
++
++	cqr = dasd_kmalloc_request("ECKD",
++				   1 /* PSF */	+ 1 /* RSSD */ ,
++				   (sizeof(struct dasd_psf_prssd_data)),
++				   device);
++	if (IS_ERR(cqr))
++		return PTR_ERR(cqr);
++	cqr->startdev = device;
++	cqr->memdev = device;
++	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
++	cqr->retries = 10;
++	cqr->expires = 20 * HZ;
++
++	/* Prepare for Read Subsystem Data */
++	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
++	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
++	prssdp->order = PSF_ORDER_PRSSD;
++	prssdp->suborder = 0x0e;	/* Read unit address configuration */
++	/* all other bytes of prssdp must be zero */
++
++	ccw = cqr->cpaddr;
++	ccw->cmd_code = DASD_ECKD_CCW_PSF;
++	ccw->count = sizeof(struct dasd_psf_prssd_data);
++	ccw->flags |= CCW_FLAG_CC;
++	ccw->cda = (__u32)(addr_t) prssdp;
++
++	/* Read Subsystem Data - feature codes */
++	memset(lcu->uac, 0, sizeof(*(lcu->uac)));
++
++	ccw++;
++	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
++	ccw->count = sizeof(*(lcu->uac));
++	ccw->cda = (__u32)(addr_t) lcu->uac;
++
++	cqr->buildclk = get_clock();
++	cqr->status = DASD_CQR_FILLED;
++
++	/* need to unset flag here to detect race with summary unit check */
++	spin_lock_irqsave(&lcu->lock, flags);
++	lcu->flags &= ~NEED_UAC_UPDATE;
++	spin_unlock_irqrestore(&lcu->lock, flags);
++
++	do {
++		rc = dasd_sleep_on(cqr);
++	} while (rc && (cqr->retries > 0));
++	if (rc) {
++		spin_lock_irqsave(&lcu->lock, flags);
++		lcu->flags |= NEED_UAC_UPDATE;
++		spin_unlock_irqrestore(&lcu->lock, flags);
++	}
++	dasd_kfree_request(cqr, cqr->memdev);
++	return rc;
++}
++
++static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
++{
++	unsigned long flags;
++	struct alias_pav_group *pavgroup, *tempgroup;
++	struct dasd_device *device, *tempdev;
++	int i, rc;
++	struct dasd_eckd_private *private;
++
++	spin_lock_irqsave(&lcu->lock, flags);
++	list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
++		list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
++					 alias_list) {
++			list_move(&device->alias_list, &lcu->active_devices);
++			private = (struct dasd_eckd_private *) device->private;
++			private->pavgroup = NULL;
++		}
++		list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
++					 alias_list) {
++			list_move(&device->alias_list, &lcu->active_devices);
++			private = (struct dasd_eckd_private *) device->private;
++			private->pavgroup = NULL;
++		}
++		list_del(&pavgroup->group);
++		kfree(pavgroup);
++	}
++	spin_unlock_irqrestore(&lcu->lock, flags);
++
++	rc = read_unit_address_configuration(refdev, lcu);
++	if (rc)
++		return rc;
++
++	spin_lock_irqsave(&lcu->lock, flags);
++	lcu->pav = NO_PAV;
++	for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
++		switch (lcu->uac->unit[i].ua_type) {
++		case UA_BASE_PAV_ALIAS:
++			lcu->pav = BASE_PAV;
++			break;
++		case UA_HYPER_PAV_ALIAS:
++			lcu->pav = HYPER_PAV;
++			break;
++		}
++		if (lcu->pav != NO_PAV)
++			break;
++	}
++
++	list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
++				 alias_list) {
++		_add_device_to_lcu(lcu, device);
++	}
++	spin_unlock_irqrestore(&lcu->lock, flags);
++	return 0;
++}
++
++static void lcu_update_work(struct work_struct *work)
++{
++	struct alias_lcu *lcu;
++	struct read_uac_work_data *ruac_data;
++	struct dasd_device *device;
++	unsigned long flags;
++	int rc;
++
++	ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
++	lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
++	device = ruac_data->device;
++	rc = _lcu_update(device, lcu);
++	/*
++	 * Need to check flags again, as there could have been another
++	 * prepare_update or a new device a new device while we were still
++	 * processing the data
++	 */
++	spin_lock_irqsave(&lcu->lock, flags);
++	if (rc || (lcu->flags & NEED_UAC_UPDATE)) {
++		DEV_MESSAGE(KERN_WARNING, device, "could not update"
++			    " alias data in lcu (rc = %d), retry later", rc);
++		schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
++	} else {
++		lcu->ruac_data.device = NULL;
++		lcu->flags &= ~UPDATE_PENDING;
++	}
++	spin_unlock_irqrestore(&lcu->lock, flags);
++}
++
++static int _schedule_lcu_update(struct alias_lcu *lcu,
++				struct dasd_device *device)
++{
++	struct dasd_device *usedev = NULL;
++	struct alias_pav_group *group;
++
++	lcu->flags |= NEED_UAC_UPDATE;
++	if (lcu->ruac_data.device) {
++		/* already scheduled or running */
++		return 0;
++	}
++	if (device && !list_empty(&device->alias_list))
++		usedev = device;
++
++	if (!usedev && !list_empty(&lcu->grouplist)) {
++		group = list_first_entry(&lcu->grouplist,
++					 struct alias_pav_group, group);
++		if (!list_empty(&group->baselist))
++			usedev = list_first_entry(&group->baselist,
++						  struct dasd_device,
++						  alias_list);
++		else if (!list_empty(&group->aliaslist))
++			usedev = list_first_entry(&group->aliaslist,
++						  struct dasd_device,
++						  alias_list);
++	}
++	if (!usedev && !list_empty(&lcu->active_devices)) {
++		usedev = list_first_entry(&lcu->active_devices,
++					  struct dasd_device, alias_list);
++	}
++	/*
++	 * if we haven't found a proper device yet, give up for now, the next
++	 * device that will be set active will trigger an lcu update
++	 */
++	if (!usedev)
++		return -EINVAL;
++	lcu->ruac_data.device = usedev;
++	schedule_delayed_work(&lcu->ruac_data.dwork, 0);
++	return 0;
++}
++
++int dasd_alias_add_device(struct dasd_device *device)
++{
++	struct dasd_eckd_private *private;
++	struct alias_lcu *lcu;
++	unsigned long flags;
++	int rc;
++
++	private = (struct dasd_eckd_private *) device->private;
++	lcu = private->lcu;
++	rc = 0;
++	spin_lock_irqsave(&lcu->lock, flags);
++	if (!(lcu->flags & UPDATE_PENDING)) {
++		rc = _add_device_to_lcu(lcu, device);
++		if (rc)
++			lcu->flags |= UPDATE_PENDING;
++	}
++	if (lcu->flags & UPDATE_PENDING) {
++		list_move(&device->alias_list, &lcu->active_devices);
++		_schedule_lcu_update(lcu, device);
++	}
++	spin_unlock_irqrestore(&lcu->lock, flags);
++	return rc;
++}
++
++int dasd_alias_remove_device(struct dasd_device *device)
++{
++	struct dasd_eckd_private *private;
++	struct alias_lcu *lcu;
++	unsigned long flags;
++
++	private = (struct dasd_eckd_private *) device->private;
++	lcu = private->lcu;
++	spin_lock_irqsave(&lcu->lock, flags);
++	_remove_device_from_lcu(lcu, device);
++	spin_unlock_irqrestore(&lcu->lock, flags);
++	return 0;
++}
++
++struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
++{
++
++	struct dasd_device *alias_device;
++	struct alias_pav_group *group;
++	struct alias_lcu *lcu;
++	struct dasd_eckd_private *private, *alias_priv;
++	unsigned long flags;
++
++	private = (struct dasd_eckd_private *) base_device->private;
++	group = private->pavgroup;
++	lcu = private->lcu;
++	if (!group || !lcu)
++		return NULL;
++	if (lcu->pav == NO_PAV ||
++	    lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
++		return NULL;
++
++	spin_lock_irqsave(&lcu->lock, flags);
++	alias_device = group->next;
++	if (!alias_device) {
++		if (list_empty(&group->aliaslist)) {
++			spin_unlock_irqrestore(&lcu->lock, flags);
++			return NULL;
++		} else {
++			alias_device = list_first_entry(&group->aliaslist,
++							struct dasd_device,
++							alias_list);
++		}
++	}
++	if (list_is_last(&alias_device->alias_list, &group->aliaslist))
++		group->next = list_first_entry(&group->aliaslist,
++					       struct dasd_device, alias_list);
++	else
++		group->next = list_first_entry(&alias_device->alias_list,
++					       struct dasd_device, alias_list);
++	spin_unlock_irqrestore(&lcu->lock, flags);
++	alias_priv = (struct dasd_eckd_private *) alias_device->private;
++	if ((alias_priv->count < private->count) && !alias_device->stopped)
++		return alias_device;
++	else
++		return NULL;
++}
++
++/*
++ * Summary unit check handling depends on the way alias devices
++ * are handled so it is done here rather then in dasd_eckd.c
++ */
++static int reset_summary_unit_check(struct alias_lcu *lcu,
++				    struct dasd_device *device,
++				    char reason)
++{
++	struct dasd_ccw_req *cqr;
++	int rc = 0;
++
++	cqr = lcu->rsu_cqr;
++	strncpy((char *) &cqr->magic, "ECKD", 4);
++	ASCEBC((char *) &cqr->magic, 4);
++	cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RSCK;
++	cqr->cpaddr->flags = 0 ;
++	cqr->cpaddr->count = 16;
++	cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
++	((char *)cqr->data)[0] = reason;
++
++	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
++	cqr->retries = 255;	/* set retry counter to enable basic ERP */
++	cqr->startdev = device;
++	cqr->memdev = device;
++	cqr->block = NULL;
++	cqr->expires = 5 * HZ;
++	cqr->buildclk = get_clock();
++	cqr->status = DASD_CQR_FILLED;
++
++	rc = dasd_sleep_on_immediatly(cqr);
++	return rc;
++}
++
++static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
++{
++	struct alias_pav_group *pavgroup;
++	struct dasd_device *device;
++	struct dasd_eckd_private *private;
++
++	/* active and inactive list can contain alias as well as base devices */
++	list_for_each_entry(device, &lcu->active_devices, alias_list) {
++		private = (struct dasd_eckd_private *) device->private;
++		if (private->uid.type != UA_BASE_DEVICE)
++			continue;
++		dasd_schedule_block_bh(device->block);
++		dasd_schedule_device_bh(device);
++	}
++	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
++		private = (struct dasd_eckd_private *) device->private;
++		if (private->uid.type != UA_BASE_DEVICE)
++			continue;
++		dasd_schedule_block_bh(device->block);
++		dasd_schedule_device_bh(device);
++	}
++	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
++		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
++			dasd_schedule_block_bh(device->block);
++			dasd_schedule_device_bh(device);
++		}
++	}
++}
++
++static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
++{
++	struct alias_pav_group *pavgroup;
++	struct dasd_device *device, *temp;
++	struct dasd_eckd_private *private;
++	int rc;
++	unsigned long flags;
++	LIST_HEAD(active);
++
++	/*
++	 * Problem here ist that dasd_flush_device_queue may wait
++	 * for termination of a request to complete. We can't keep
++	 * the lcu lock during that time, so we must assume that
++	 * the lists may have changed.
++	 * Idea: first gather all active alias devices in a separate list,
++	 * then flush the first element of this list unlocked, and afterwards
++	 * check if it is still on the list before moving it to the
++	 * active_devices list.
++	 */
++
++	spin_lock_irqsave(&lcu->lock, flags);
++	list_for_each_entry_safe(device, temp, &lcu->active_devices,
++				 alias_list) {
++		private = (struct dasd_eckd_private *) device->private;
++		if (private->uid.type == UA_BASE_DEVICE)
++			continue;
++		list_move(&device->alias_list, &active);
++	}
++
++	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
++		list_splice_init(&pavgroup->aliaslist, &active);
++	}
++	while (!list_empty(&active)) {
++		device = list_first_entry(&active, struct dasd_device,
++					  alias_list);
++		spin_unlock_irqrestore(&lcu->lock, flags);
++		rc = dasd_flush_device_queue(device);
++		spin_lock_irqsave(&lcu->lock, flags);
++		/*
++		 * only move device around if it wasn't moved away while we
++		 * were waiting for the flush
++		 */
++		if (device == list_first_entry(&active,
++					       struct dasd_device, alias_list))
++			list_move(&device->alias_list, &lcu->active_devices);
++	}
++	spin_unlock_irqrestore(&lcu->lock, flags);
++}
++
++/*
++ * This function is called in interrupt context, so the
++ * cdev lock for device is already locked!
++ */
++static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
++				     struct dasd_device *device)
++{
++	struct alias_pav_group *pavgroup;
++	struct dasd_device *pos;
++
++	list_for_each_entry(pos, &lcu->active_devices, alias_list) {
++		if (pos != device)
++			spin_lock(get_ccwdev_lock(pos->cdev));
++		pos->stopped |= DASD_STOPPED_SU;
++		if (pos != device)
++			spin_unlock(get_ccwdev_lock(pos->cdev));
++	}
++	list_for_each_entry(pos, &lcu->inactive_devices, alias_list) {
++		if (pos != device)
++			spin_lock(get_ccwdev_lock(pos->cdev));
++		pos->stopped |= DASD_STOPPED_SU;
++		if (pos != device)
++			spin_unlock(get_ccwdev_lock(pos->cdev));
++	}
++	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
++		list_for_each_entry(pos, &pavgroup->baselist, alias_list) {
++			if (pos != device)
++				spin_lock(get_ccwdev_lock(pos->cdev));
++			pos->stopped |= DASD_STOPPED_SU;
++			if (pos != device)
++				spin_unlock(get_ccwdev_lock(pos->cdev));
++		}
++		list_for_each_entry(pos, &pavgroup->aliaslist, alias_list) {
++			if (pos != device)
++				spin_lock(get_ccwdev_lock(pos->cdev));
++			pos->stopped |= DASD_STOPPED_SU;
++			if (pos != device)
++				spin_unlock(get_ccwdev_lock(pos->cdev));
++		}
++	}
++}
++
++static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
++{
++	struct alias_pav_group *pavgroup;
++	struct dasd_device *device;
++	unsigned long flags;
++
++	list_for_each_entry(device, &lcu->active_devices, alias_list) {
++		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
++		device->stopped &= ~DASD_STOPPED_SU;
++		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
++	}
++
++	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
++		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
++		device->stopped &= ~DASD_STOPPED_SU;
++		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
++	}
++
++	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
++		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
++			spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
++			device->stopped &= ~DASD_STOPPED_SU;
++			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
++					       flags);
++		}
++		list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
++			spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
++			device->stopped &= ~DASD_STOPPED_SU;
++			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
++					       flags);
++		}
++	}
++}
++
++static void summary_unit_check_handling_work(struct work_struct *work)
++{
++	struct alias_lcu *lcu;
++	struct summary_unit_check_work_data *suc_data;
++	unsigned long flags;
++	struct dasd_device *device;
++
++	suc_data = container_of(work, struct summary_unit_check_work_data,
++				worker);
++	lcu = container_of(suc_data, struct alias_lcu, suc_data);
++	device = suc_data->device;
++
++	/* 1. flush alias devices */
++	flush_all_alias_devices_on_lcu(lcu);
++
++	/* 2. reset summary unit check */
++	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
++	device->stopped &= ~(DASD_STOPPED_SU | DASD_STOPPED_PENDING);
++	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
++	reset_summary_unit_check(lcu, device, suc_data->reason);
++
++	spin_lock_irqsave(&lcu->lock, flags);
++	_unstop_all_devices_on_lcu(lcu);
++	_restart_all_base_devices_on_lcu(lcu);
++	/* 3. read new alias configuration */
++	_schedule_lcu_update(lcu, device);
++	lcu->suc_data.device = NULL;
++	spin_unlock_irqrestore(&lcu->lock, flags);
++}
++
++/*
++ * note: this will be called from int handler context (cdev locked)
++ */
++void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
++					  struct irb *irb)
++{
++	struct alias_lcu *lcu;
++	char reason;
++	struct dasd_eckd_private *private;
++
++	private = (struct dasd_eckd_private *) device->private;
++
++	reason = irb->ecw[8];
++	DEV_MESSAGE(KERN_WARNING, device, "%s %x",
++		    "eckd handle summary unit check: reason", reason);
++
++	lcu = private->lcu;
++	if (!lcu) {
++		DEV_MESSAGE(KERN_WARNING, device, "%s",
++			    "device not ready to handle summary"
++			    " unit check (no lcu structure)");
++		return;
++	}
++	spin_lock(&lcu->lock);
++	_stop_all_devices_on_lcu(lcu, device);
++	/* prepare for lcu_update */
++	private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
++	/* If this device is about to be removed just return and wait for
++	 * the next interrupt on a different device
++	 */
++	if (list_empty(&device->alias_list)) {
++		DEV_MESSAGE(KERN_WARNING, device, "%s",
++			    "device is in offline processing,"
++			    " don't do summary unit check handling");
++		spin_unlock(&lcu->lock);
++		return;
++	}
++	if (lcu->suc_data.device) {
++		/* already scheduled or running */
++		DEV_MESSAGE(KERN_WARNING, device, "%s",
++			    "previous instance of summary unit check worker"
++			    " still pending");
++		spin_unlock(&lcu->lock);
++		return ;
++	}
++	lcu->suc_data.reason = reason;
++	lcu->suc_data.device = device;
++	spin_unlock(&lcu->lock);
++	schedule_work(&lcu->suc_data.worker);
++};
+diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
+index 0c67258..f4fb402 100644
+--- a/drivers/s390/block/dasd_devmap.c
++++ b/drivers/s390/block/dasd_devmap.c
+@@ -49,22 +49,6 @@ struct dasd_devmap {
+ };
+ 
+ /*
+- * dasd_server_ssid_map contains a globally unique storage server subsystem ID.
+- * dasd_server_ssid_list contains the list of all subsystem IDs accessed by
+- * the DASD device driver.
+- */
+-struct dasd_server_ssid_map {
+-	struct list_head list;
+-	struct system_id {
+-		char vendor[4];
+-		char serial[15];
+-		__u16 ssid;
+-	} sid;
+-};
+-
+-static struct list_head dasd_server_ssid_list;
+-
+-/*
+  * Parameter parsing functions for dasd= parameter. The syntax is:
+  *   <devno>		: (0x)?[0-9a-fA-F]+
+  *   <busid>		: [0-0a-f]\.[0-9a-f]\.(0x)?[0-9a-fA-F]+
+@@ -721,8 +705,9 @@ dasd_ro_store(struct device *dev, struct device_attribute *attr,
+ 		devmap->features &= ~DASD_FEATURE_READONLY;
+ 	if (devmap->device)
+ 		devmap->device->features = devmap->features;
+-	if (devmap->device && devmap->device->gdp)
+-		set_disk_ro(devmap->device->gdp, val);
++	if (devmap->device && devmap->device->block
++	    && devmap->device->block->gdp)
++		set_disk_ro(devmap->device->block->gdp, val);
+ 	spin_unlock(&dasd_devmap_lock);
+ 	return count;
+ }
+@@ -893,12 +878,16 @@ dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf)
+ 
+ 	devmap = dasd_find_busid(dev->bus_id);
+ 	spin_lock(&dasd_devmap_lock);
+-	if (!IS_ERR(devmap))
+-		alias = devmap->uid.alias;
++	if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
++		spin_unlock(&dasd_devmap_lock);
++		return sprintf(buf, "0\n");
++	}
++	if (devmap->uid.type == UA_BASE_PAV_ALIAS ||
++	    devmap->uid.type == UA_HYPER_PAV_ALIAS)
++		alias = 1;
+ 	else
+ 		alias = 0;
+ 	spin_unlock(&dasd_devmap_lock);
+-
+ 	return sprintf(buf, alias ? "1\n" : "0\n");
+ }
+ 
+@@ -930,19 +919,36 @@ static ssize_t
+ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+ 	struct dasd_devmap *devmap;
+-	char uid[UID_STRLEN];
++	char uid_string[UID_STRLEN];
++	char ua_string[3];
++	struct dasd_uid *uid;
+ 
+ 	devmap = dasd_find_busid(dev->bus_id);
+ 	spin_lock(&dasd_devmap_lock);
+-	if (!IS_ERR(devmap) && strlen(devmap->uid.vendor) > 0)
+-		snprintf(uid, sizeof(uid), "%s.%s.%04x.%02x",
+-			 devmap->uid.vendor, devmap->uid.serial,
+-			 devmap->uid.ssid, devmap->uid.unit_addr);
+-	else
+-		uid[0] = 0;
++	if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
++		spin_unlock(&dasd_devmap_lock);
++		return sprintf(buf, "\n");
++	}
++	uid = &devmap->uid;
++	switch (uid->type) {
++	case UA_BASE_DEVICE:
++		sprintf(ua_string, "%02x", uid->real_unit_addr);
++		break;
++	case UA_BASE_PAV_ALIAS:
++		sprintf(ua_string, "%02x", uid->base_unit_addr);
++		break;
++	case UA_HYPER_PAV_ALIAS:
++		sprintf(ua_string, "xx");
++		break;
++	default:
++		/* should not happen, treat like base device */
++		sprintf(ua_string, "%02x", uid->real_unit_addr);
++		break;
++	}
++	snprintf(uid_string, sizeof(uid_string), "%s.%s.%04x.%s",
++		 uid->vendor, uid->serial, uid->ssid, ua_string);
+ 	spin_unlock(&dasd_devmap_lock);
+-
+-	return snprintf(buf, PAGE_SIZE, "%s\n", uid);
++	return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
+ }
+ 
+ static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL);
+@@ -1040,39 +1046,16 @@ int
+ dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
+ {
+ 	struct dasd_devmap *devmap;
+-	struct dasd_server_ssid_map *srv, *tmp;
+ 
+ 	devmap = dasd_find_busid(cdev->dev.bus_id);
+ 	if (IS_ERR(devmap))
+ 		return PTR_ERR(devmap);
+ 
+-	/* generate entry for server_ssid_map */
+-	srv = (struct dasd_server_ssid_map *)
+-		kzalloc(sizeof(struct dasd_server_ssid_map), GFP_KERNEL);
+-	if (!srv)
+-		return -ENOMEM;
+-	strncpy(srv->sid.vendor, uid->vendor, sizeof(srv->sid.vendor) - 1);
+-	strncpy(srv->sid.serial, uid->serial, sizeof(srv->sid.serial) - 1);
+-	srv->sid.ssid = uid->ssid;
+-
+-	/* server is already contained ? */
+ 	spin_lock(&dasd_devmap_lock);
+ 	devmap->uid = *uid;
+-	list_for_each_entry(tmp, &dasd_server_ssid_list, list) {
+-		if (!memcmp(&srv->sid, &tmp->sid,
+-			    sizeof(struct system_id))) {
+-			kfree(srv);
+-			srv = NULL;
+-			break;
+-		}
+-	}
+-
+-	/* add servermap to serverlist */
+-	if (srv)
+-		list_add(&srv->list, &dasd_server_ssid_list);
+ 	spin_unlock(&dasd_devmap_lock);
+ 
+-	return (srv ? 1 : 0);
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(dasd_set_uid);
+ 
+@@ -1138,9 +1121,6 @@ dasd_devmap_init(void)
+ 	dasd_max_devindex = 0;
+ 	for (i = 0; i < 256; i++)
+ 		INIT_LIST_HEAD(&dasd_hashlists[i]);
+-
+-	/* Initialize servermap structure. */
+-	INIT_LIST_HEAD(&dasd_server_ssid_list);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
+index 571320a..d91df38 100644
+--- a/drivers/s390/block/dasd_diag.c
++++ b/drivers/s390/block/dasd_diag.c
+@@ -142,7 +142,7 @@ dasd_diag_erp(struct dasd_device *device)
+ 	int rc;
+ 
+ 	mdsk_term_io(device);
+-	rc = mdsk_init_io(device, device->bp_block, 0, NULL);
++	rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
+ 	if (rc)
+ 		DEV_MESSAGE(KERN_WARNING, device, "DIAG ERP unsuccessful, "
+ 			    "rc=%d", rc);
+@@ -158,11 +158,11 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
+ 	struct dasd_diag_req *dreq;
+ 	int rc;
+ 
+-	device = cqr->device;
++	device = cqr->startdev;
+ 	if (cqr->retries < 0) {
+ 		DEV_MESSAGE(KERN_WARNING, device, "DIAG start_IO: request %p "
+ 			    "- no retry left)", cqr);
+-		cqr->status = DASD_CQR_FAILED;
++		cqr->status = DASD_CQR_ERROR;
+ 		return -EIO;
+ 	}
+ 	private = (struct dasd_diag_private *) device->private;
+@@ -184,7 +184,7 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
+ 	switch (rc) {
+ 	case 0: /* Synchronous I/O finished successfully */
+ 		cqr->stopclk = get_clock();
+-		cqr->status = DASD_CQR_DONE;
++		cqr->status = DASD_CQR_SUCCESS;
+ 		/* Indicate to calling function that only a dasd_schedule_bh()
+ 		   and no timer is needed */
+                 rc = -EACCES;
+@@ -209,12 +209,12 @@ dasd_diag_term_IO(struct dasd_ccw_req * cqr)
+ {
+ 	struct dasd_device *device;
+ 
+-	device = cqr->device;
++	device = cqr->startdev;
+ 	mdsk_term_io(device);
+-	mdsk_init_io(device, device->bp_block, 0, NULL);
+-	cqr->status = DASD_CQR_CLEAR;
++	mdsk_init_io(device, device->block->bp_block, 0, NULL);
++	cqr->status = DASD_CQR_CLEAR_PENDING;
+ 	cqr->stopclk = get_clock();
+-	dasd_schedule_bh(device);
++	dasd_schedule_device_bh(device);
+ 	return 0;
+ }
+ 
+@@ -247,7 +247,7 @@ dasd_ext_handler(__u16 code)
+ 		return;
+ 	}
+ 	cqr = (struct dasd_ccw_req *) ip;
+-	device = (struct dasd_device *) cqr->device;
++	device = (struct dasd_device *) cqr->startdev;
+ 	if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
+ 		DEV_MESSAGE(KERN_WARNING, device,
+ 			    " magic number of dasd_ccw_req 0x%08X doesn't"
+@@ -260,10 +260,10 @@ dasd_ext_handler(__u16 code)
+ 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ 
+ 	/* Check for a pending clear operation */
+-	if (cqr->status == DASD_CQR_CLEAR) {
+-		cqr->status = DASD_CQR_QUEUED;
+-		dasd_clear_timer(device);
+-		dasd_schedule_bh(device);
++	if (cqr->status == DASD_CQR_CLEAR_PENDING) {
++		cqr->status = DASD_CQR_CLEARED;
++		dasd_device_clear_timer(device);
++		dasd_schedule_device_bh(device);
+ 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ 		return;
+ 	}
+@@ -272,11 +272,11 @@ dasd_ext_handler(__u16 code)
+ 
+ 	expires = 0;
+ 	if (status == 0) {
+-		cqr->status = DASD_CQR_DONE;
++		cqr->status = DASD_CQR_SUCCESS;
+ 		/* Start first request on queue if possible -> fast_io. */
+ 		if (!list_empty(&device->ccw_queue)) {
+ 			next = list_entry(device->ccw_queue.next,
+-					  struct dasd_ccw_req, list);
++					  struct dasd_ccw_req, devlist);
+ 			if (next->status == DASD_CQR_QUEUED) {
+ 				rc = dasd_start_diag(next);
+ 				if (rc == 0)
+@@ -296,10 +296,10 @@ dasd_ext_handler(__u16 code)
+ 	}
+ 
+ 	if (expires != 0)
+-		dasd_set_timer(device, expires);
++		dasd_device_set_timer(device, expires);
+ 	else
+-		dasd_clear_timer(device);
+-	dasd_schedule_bh(device);
++		dasd_device_clear_timer(device);
++	dasd_schedule_device_bh(device);
+ 
+ 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ }
+@@ -309,6 +309,7 @@ dasd_ext_handler(__u16 code)
+ static int
+ dasd_diag_check_device(struct dasd_device *device)
+ {
++	struct dasd_block *block;
+ 	struct dasd_diag_private *private;
+ 	struct dasd_diag_characteristics *rdc_data;
+ 	struct dasd_diag_bio bio;
+@@ -328,6 +329,16 @@ dasd_diag_check_device(struct dasd_device *device)
+ 		ccw_device_get_id(device->cdev, &private->dev_id);
+ 		device->private = (void *) private;
+ 	}
++	block = dasd_alloc_block();
++	if (IS_ERR(block)) {
++		DEV_MESSAGE(KERN_WARNING, device, "%s",
++			    "could not allocate dasd block structure");
++		kfree(device->private);
++		return PTR_ERR(block);
++	}
++	device->block = block;
++	block->base = device;
++
+ 	/* Read Device Characteristics */
+ 	rdc_data = (void *) &(private->rdc_data);
+ 	rdc_data->dev_nr = private->dev_id.devno;
+@@ -409,14 +420,14 @@ dasd_diag_check_device(struct dasd_device *device)
+ 		  sizeof(DASD_DIAG_CMS1)) == 0) {
+ 		/* get formatted blocksize from label block */
+ 		bsize = (unsigned int) label->block_size;
+-		device->blocks = (unsigned long) label->block_count;
++		block->blocks = (unsigned long) label->block_count;
+ 	} else
+-		device->blocks = end_block;
+-	device->bp_block = bsize;
+-	device->s2b_shift = 0;	/* bits to shift 512 to get a block */
++		block->blocks = end_block;
++	block->bp_block = bsize;
++	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
+ 	for (sb = 512; sb < bsize; sb = sb << 1)
+-		device->s2b_shift++;
+-	rc = mdsk_init_io(device, device->bp_block, 0, NULL);
++		block->s2b_shift++;
++	rc = mdsk_init_io(device, block->bp_block, 0, NULL);
+ 	if (rc) {
+ 		DEV_MESSAGE(KERN_WARNING, device, "DIAG initialization "
+ 			"failed (rc=%d)", rc);
+@@ -424,9 +435,9 @@ dasd_diag_check_device(struct dasd_device *device)
+ 	} else {
+ 		DEV_MESSAGE(KERN_INFO, device,
+ 			    "(%ld B/blk): %ldkB",
+-			    (unsigned long) device->bp_block,
+-			    (unsigned long) (device->blocks <<
+-				device->s2b_shift) >> 1);
++			    (unsigned long) block->bp_block,
++			    (unsigned long) (block->blocks <<
++				block->s2b_shift) >> 1);
+ 	}
+ out:
+ 	free_page((long) label);
+@@ -436,22 +447,16 @@ out:
+ /* Fill in virtual disk geometry for device. Return zero on success, non-zero
+  * otherwise. */
+ static int
+-dasd_diag_fill_geometry(struct dasd_device *device, struct hd_geometry *geo)
++dasd_diag_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
+ {
+-	if (dasd_check_blocksize(device->bp_block) != 0)
++	if (dasd_check_blocksize(block->bp_block) != 0)
+ 		return -EINVAL;
+-	geo->cylinders = (device->blocks << device->s2b_shift) >> 10;
++	geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
+ 	geo->heads = 16;
+-	geo->sectors = 128 >> device->s2b_shift;
++	geo->sectors = 128 >> block->s2b_shift;
+ 	return 0;
+ }
+ 
+-static dasd_era_t
+-dasd_diag_examine_error(struct dasd_ccw_req * cqr, struct irb * stat)
+-{
+-	return dasd_era_fatal;
+-}
+-
+ static dasd_erp_fn_t
+ dasd_diag_erp_action(struct dasd_ccw_req * cqr)
+ {
+@@ -466,8 +471,9 @@ dasd_diag_erp_postaction(struct dasd_ccw_req * cqr)
+ 
+ /* Create DASD request from block device request. Return pointer to new
+  * request on success, ERR_PTR otherwise. */
+-static struct dasd_ccw_req *
+-dasd_diag_build_cp(struct dasd_device * device, struct request *req)
++static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
++					       struct dasd_block *block,
++					       struct request *req)
+ {
+ 	struct dasd_ccw_req *cqr;
+ 	struct dasd_diag_req *dreq;
+@@ -486,17 +492,17 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
+ 		rw_cmd = MDSK_WRITE_REQ;
+ 	else
+ 		return ERR_PTR(-EINVAL);
+-	blksize = device->bp_block;
++	blksize = block->bp_block;
+ 	/* Calculate record id of first and last block. */
+-	first_rec = req->sector >> device->s2b_shift;
+-	last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift;
++	first_rec = req->sector >> block->s2b_shift;
++	last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
+ 	/* Check struct bio and count the number of blocks for the request. */
+ 	count = 0;
+ 	rq_for_each_segment(bv, req, iter) {
+ 		if (bv->bv_len & (blksize - 1))
+ 			/* Fba can only do full blocks. */
+ 			return ERR_PTR(-EINVAL);
+-		count += bv->bv_len >> (device->s2b_shift + 9);
++		count += bv->bv_len >> (block->s2b_shift + 9);
+ 	}
+ 	/* Paranoia. */
+ 	if (count != last_rec - first_rec + 1)
+@@ -505,7 +511,7 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
+ 	datasize = sizeof(struct dasd_diag_req) +
+ 		count*sizeof(struct dasd_diag_bio);
+ 	cqr = dasd_smalloc_request(dasd_diag_discipline.name, 0,
+-				   datasize, device);
++				   datasize, memdev);
+ 	if (IS_ERR(cqr))
+ 		return cqr;
+ 
+@@ -529,7 +535,9 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
+ 	cqr->buildclk = get_clock();
+ 	if (req->cmd_flags & REQ_FAILFAST)
+ 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+-	cqr->device = device;
++	cqr->startdev = memdev;
++	cqr->memdev = memdev;
++	cqr->block = block;
+ 	cqr->expires = DIAG_TIMEOUT;
+ 	cqr->status = DASD_CQR_FILLED;
+ 	return cqr;
+@@ -543,10 +551,15 @@ dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+ 	int status;
+ 
+ 	status = cqr->status == DASD_CQR_DONE;
+-	dasd_sfree_request(cqr, cqr->device);
++	dasd_sfree_request(cqr, cqr->memdev);
+ 	return status;
+ }
+ 
++static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr)
++{
++	cqr->status = DASD_CQR_FILLED;
++};
++
+ /* Fill in IOCTL data for device. */
+ static int
+ dasd_diag_fill_info(struct dasd_device * device,
+@@ -583,7 +596,7 @@ static struct dasd_discipline dasd_diag_discipline = {
+ 	.fill_geometry = dasd_diag_fill_geometry,
+ 	.start_IO = dasd_start_diag,
+ 	.term_IO = dasd_diag_term_IO,
+-	.examine_error = dasd_diag_examine_error,
++	.handle_terminated_request = dasd_diag_handle_terminated_request,
+ 	.erp_action = dasd_diag_erp_action,
+ 	.erp_postaction = dasd_diag_erp_postaction,
+ 	.build_cp = dasd_diag_build_cp,
+diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
+index 44adf84..61f1693 100644
+--- a/drivers/s390/block/dasd_eckd.c
++++ b/drivers/s390/block/dasd_eckd.c
+@@ -52,16 +52,6 @@ MODULE_LICENSE("GPL");
+ 
+ static struct dasd_discipline dasd_eckd_discipline;
+ 
+-struct dasd_eckd_private {
+-	struct dasd_eckd_characteristics rdc_data;
+-	struct dasd_eckd_confdata conf_data;
+-	struct dasd_eckd_path path_data;
+-	struct eckd_count count_area[5];
+-	int init_cqr_status;
+-	int uses_cdl;
+-	struct attrib_data_t attrib;	/* e.g. cache operations */
+-};
+-
+ /* The ccw bus type uses this table to find devices that it sends to
+  * dasd_eckd_probe */
+ static struct ccw_device_id dasd_eckd_ids[] = {
+@@ -188,7 +178,7 @@ check_XRC (struct ccw1         *de_ccw,
+ 	if (rc == -ENOSYS || rc == -EACCES)
+ 		rc = 0;
+ 
+-	de_ccw->count = sizeof (struct DE_eckd_data);
++	de_ccw->count = sizeof(struct DE_eckd_data);
+ 	de_ccw->flags |= CCW_FLAG_SLI;
+ 	return rc;
+ }
+@@ -208,7 +198,7 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
+ 	ccw->count = 16;
+ 	ccw->cda = (__u32) __pa(data);
+ 
+-	memset(data, 0, sizeof (struct DE_eckd_data));
++	memset(data, 0, sizeof(struct DE_eckd_data));
+ 	switch (cmd) {
+ 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
+ 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
+@@ -280,6 +270,132 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
+ 	return rc;
+ }
+ 
++static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
++			       struct dasd_device  *device)
++{
++	struct dasd_eckd_private *private;
++	int rc;
++
++	private = (struct dasd_eckd_private *) device->private;
++	if (!private->rdc_data.facilities.XRC_supported)
++		return 0;
++
++	/* switch on System Time Stamp - needed for XRC Support */
++	pfxdata->define_extend.ga_extended |= 0x08; /* 'Time Stamp Valid'   */
++	pfxdata->define_extend.ga_extended |= 0x02; /* 'Extended Parameter' */
++	pfxdata->validity.time_stamp = 1;	    /* 'Time Stamp Valid'   */
++
++	rc = get_sync_clock(&pfxdata->define_extend.ep_sys_time);
++	/* Ignore return code if sync clock is switched off. */
++	if (rc == -ENOSYS || rc == -EACCES)
++		rc = 0;
++	return rc;
++}
++
++static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, int trk,
++		  int totrk, int cmd, struct dasd_device *basedev,
++		  struct dasd_device *startdev)
++{
++	struct dasd_eckd_private *basepriv, *startpriv;
++	struct DE_eckd_data *data;
++	struct ch_t geo, beg, end;
++	int rc = 0;
++
++	basepriv = (struct dasd_eckd_private *) basedev->private;
++	startpriv = (struct dasd_eckd_private *) startdev->private;
++	data = &pfxdata->define_extend;
++
++	ccw->cmd_code = DASD_ECKD_CCW_PFX;
++	ccw->flags = 0;
++	ccw->count = sizeof(*pfxdata);
++	ccw->cda = (__u32) __pa(pfxdata);
++
++	memset(pfxdata, 0, sizeof(*pfxdata));
++	/* prefix data */
++	pfxdata->format = 0;
++	pfxdata->base_address = basepriv->conf_data.ned1.unit_addr;
++	pfxdata->base_lss = basepriv->conf_data.ned1.ID;
++	pfxdata->validity.define_extend = 1;
++
++	/* private uid is kept up to date, conf_data may be outdated */
++	if (startpriv->uid.type != UA_BASE_DEVICE) {
++		pfxdata->validity.verify_base = 1;
++		if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
++			pfxdata->validity.hyper_pav = 1;
++	}
++
++	/* define extend data (mostly)*/
++	switch (cmd) {
++	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
++	case DASD_ECKD_CCW_READ_RECORD_ZERO:
++	case DASD_ECKD_CCW_READ:
++	case DASD_ECKD_CCW_READ_MT:
++	case DASD_ECKD_CCW_READ_CKD:
++	case DASD_ECKD_CCW_READ_CKD_MT:
++	case DASD_ECKD_CCW_READ_KD:
++	case DASD_ECKD_CCW_READ_KD_MT:
++	case DASD_ECKD_CCW_READ_COUNT:
++		data->mask.perm = 0x1;
++		data->attributes.operation = basepriv->attrib.operation;
++		break;
++	case DASD_ECKD_CCW_WRITE:
++	case DASD_ECKD_CCW_WRITE_MT:
++	case DASD_ECKD_CCW_WRITE_KD:
++	case DASD_ECKD_CCW_WRITE_KD_MT:
++		data->mask.perm = 0x02;
++		data->attributes.operation = basepriv->attrib.operation;
++		rc = check_XRC_on_prefix(pfxdata, basedev);
++		break;
++	case DASD_ECKD_CCW_WRITE_CKD:
++	case DASD_ECKD_CCW_WRITE_CKD_MT:
++		data->attributes.operation = DASD_BYPASS_CACHE;
++		rc = check_XRC_on_prefix(pfxdata, basedev);
++		break;
++	case DASD_ECKD_CCW_ERASE:
++	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
++	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
++		data->mask.perm = 0x3;
++		data->mask.auth = 0x1;
++		data->attributes.operation = DASD_BYPASS_CACHE;
++		rc = check_XRC_on_prefix(pfxdata, basedev);
++		break;
++	default:
++		DEV_MESSAGE(KERN_ERR, basedev, "unknown opcode 0x%x", cmd);
++		break;
++	}
++
++	data->attributes.mode = 0x3;	/* ECKD */
++
++	if ((basepriv->rdc_data.cu_type == 0x2105 ||
++	     basepriv->rdc_data.cu_type == 0x2107 ||
++	     basepriv->rdc_data.cu_type == 0x1750)
++	    && !(basepriv->uses_cdl && trk < 2))
++		data->ga_extended |= 0x40; /* Regular Data Format Mode */
++
++	geo.cyl = basepriv->rdc_data.no_cyl;
++	geo.head = basepriv->rdc_data.trk_per_cyl;
++	beg.cyl = trk / geo.head;
++	beg.head = trk % geo.head;
++	end.cyl = totrk / geo.head;
++	end.head = totrk % geo.head;
++
++	/* check for sequential prestage - enhance cylinder range */
++	if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
++	    data->attributes.operation == DASD_SEQ_ACCESS) {
++
++		if (end.cyl + basepriv->attrib.nr_cyl < geo.cyl)
++			end.cyl += basepriv->attrib.nr_cyl;
++		else
++			end.cyl = (geo.cyl - 1);
++	}
++
++	data->beg_ext.cyl = beg.cyl;
++	data->beg_ext.head = beg.head;
++	data->end_ext.cyl = end.cyl;
++	data->end_ext.head = end.head;
++	return rc;
++}
++
+ static void
+ locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
+ 	      int rec_on_trk, int no_rec, int cmd,
+@@ -300,7 +416,7 @@ locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
+ 	ccw->count = 16;
+ 	ccw->cda = (__u32) __pa(data);
+ 
+-	memset(data, 0, sizeof (struct LO_eckd_data));
++	memset(data, 0, sizeof(struct LO_eckd_data));
+ 	sector = 0;
+ 	if (rec_on_trk) {
+ 		switch (private->rdc_data.dev_type) {
+@@ -441,12 +557,15 @@ dasd_eckd_generate_uid(struct dasd_device *device, struct dasd_uid *uid)
+ 	       sizeof(uid->serial) - 1);
+ 	EBCASC(uid->serial, sizeof(uid->serial) - 1);
+ 	uid->ssid = confdata->neq.subsystemID;
+-	if (confdata->ned2.sneq.flags == 0x40) {
+-		uid->alias = 1;
+-		uid->unit_addr = confdata->ned2.sneq.base_unit_addr;
+-	} else
+-		uid->unit_addr = confdata->ned1.unit_addr;
+-
++	uid->real_unit_addr = confdata->ned1.unit_addr;
++	if (confdata->ned2.sneq.flags == 0x40 &&
++	    confdata->ned2.sneq.format == 0x0001) {
++		uid->type = confdata->ned2.sneq.sua_flags;
++		if (uid->type == UA_BASE_PAV_ALIAS)
++			uid->base_unit_addr = confdata->ned2.sneq.base_unit_addr;
++	} else {
++		uid->type = UA_BASE_DEVICE;
++	}
+ 	return 0;
+ }
+ 
+@@ -470,7 +589,9 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
+ 	ccw->cda = (__u32)(addr_t)rcd_buffer;
+ 	ccw->count = ciw->count;
+ 
+-	cqr->device = device;
++	cqr->startdev = device;
++	cqr->memdev = device;
++	cqr->block = NULL;
+ 	cqr->expires = 10*HZ;
+ 	cqr->lpm = lpm;
+ 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+@@ -511,7 +632,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
+ 	/*
+ 	 * on success we update the user input parms
+ 	 */
+-	dasd_sfree_request(cqr, cqr->device);
++	dasd_sfree_request(cqr, cqr->memdev);
+ 	if (ret)
+ 		goto out_error;
+ 
+@@ -557,19 +678,19 @@ dasd_eckd_read_conf(struct dasd_device *device)
+ 					"data retrieved");
+ 				continue;	/* no error */
+ 			}
+-			if (conf_len != sizeof (struct dasd_eckd_confdata)) {
++			if (conf_len != sizeof(struct dasd_eckd_confdata)) {
+ 				MESSAGE(KERN_WARNING,
+ 					"sizes of configuration data mismatch"
+ 					"%d (read) vs %ld (expected)",
+ 					conf_len,
+-					sizeof (struct dasd_eckd_confdata));
++					sizeof(struct dasd_eckd_confdata));
+ 				kfree(conf_data);
+ 				continue;	/* no error */
+ 			}
+ 			/* save first valid configuration data */
+ 			if (!conf_data_saved){
+ 				memcpy(&private->conf_data, conf_data,
+-				       sizeof (struct dasd_eckd_confdata));
++				       sizeof(struct dasd_eckd_confdata));
+ 				conf_data_saved++;
+ 			}
+ 			switch (((char *)conf_data)[242] & 0x07){
+@@ -586,39 +707,104 @@ dasd_eckd_read_conf(struct dasd_device *device)
+ 	return 0;
+ }
+ 
++static int dasd_eckd_read_features(struct dasd_device *device)
++{
++	struct dasd_psf_prssd_data *prssdp;
++	struct dasd_rssd_features *features;
++	struct dasd_ccw_req *cqr;
++	struct ccw1 *ccw;
++	int rc;
++	struct dasd_eckd_private *private;
++
++	private = (struct dasd_eckd_private *) device->private;
++	cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
++				   1 /* PSF */	+ 1 /* RSSD */ ,
++				   (sizeof(struct dasd_psf_prssd_data) +
++				    sizeof(struct dasd_rssd_features)),
++				   device);
++	if (IS_ERR(cqr)) {
++		DEV_MESSAGE(KERN_WARNING, device, "%s",
++			    "Could not allocate initialization request");
++		return PTR_ERR(cqr);
++	}
++	cqr->startdev = device;
++	cqr->memdev = device;
++	cqr->block = NULL;
++	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
++	cqr->retries = 5;
++	cqr->expires = 10 * HZ;
++
++	/* Prepare for Read Subsystem Data */
++	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
++	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
++	prssdp->order = PSF_ORDER_PRSSD;
++	prssdp->suborder = 0x41;	/* Read Feature Codes */
++	/* all other bytes of prssdp must be zero */
++
++	ccw = cqr->cpaddr;
++	ccw->cmd_code = DASD_ECKD_CCW_PSF;
++	ccw->count = sizeof(struct dasd_psf_prssd_data);
++	ccw->flags |= CCW_FLAG_CC;
++	ccw->cda = (__u32)(addr_t) prssdp;
++
++	/* Read Subsystem Data - feature codes */
++	features = (struct dasd_rssd_features *) (prssdp + 1);
++	memset(features, 0, sizeof(struct dasd_rssd_features));
++
++	ccw++;
++	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
++	ccw->count = sizeof(struct dasd_rssd_features);
++	ccw->cda = (__u32)(addr_t) features;
++
++	cqr->buildclk = get_clock();
++	cqr->status = DASD_CQR_FILLED;
++	rc = dasd_sleep_on(cqr);
++	if (rc == 0) {
++		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
++		features = (struct dasd_rssd_features *) (prssdp + 1);
++		memcpy(&private->features, features,
++		       sizeof(struct dasd_rssd_features));
++	}
++	dasd_sfree_request(cqr, cqr->memdev);
++	return rc;
++}
++
++
+ /*
+  * Build CP for Perform Subsystem Function - SSC.
+  */
+-static struct dasd_ccw_req *
+-dasd_eckd_build_psf_ssc(struct dasd_device *device)
++static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device)
+ {
+-       struct dasd_ccw_req *cqr;
+-       struct dasd_psf_ssc_data *psf_ssc_data;
+-       struct ccw1 *ccw;
++	struct dasd_ccw_req *cqr;
++	struct dasd_psf_ssc_data *psf_ssc_data;
++	struct ccw1 *ccw;
+ 
+-       cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ ,
++	cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ ,
+ 				  sizeof(struct dasd_psf_ssc_data),
+ 				  device);
+ 
+-       if (IS_ERR(cqr)) {
+-	       DEV_MESSAGE(KERN_WARNING, device, "%s",
++	if (IS_ERR(cqr)) {
++		DEV_MESSAGE(KERN_WARNING, device, "%s",
+ 			   "Could not allocate PSF-SSC request");
+-	       return cqr;
+-       }
+-       psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
+-       psf_ssc_data->order = PSF_ORDER_SSC;
+-       psf_ssc_data->suborder = 0x08;
+-
+-       ccw = cqr->cpaddr;
+-       ccw->cmd_code = DASD_ECKD_CCW_PSF;
+-       ccw->cda = (__u32)(addr_t)psf_ssc_data;
+-       ccw->count = 66;
+-
+-       cqr->device = device;
+-       cqr->expires = 10*HZ;
+-       cqr->buildclk = get_clock();
+-       cqr->status = DASD_CQR_FILLED;
+-       return cqr;
++		return cqr;
++	}
++	psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
++	psf_ssc_data->order = PSF_ORDER_SSC;
++	psf_ssc_data->suborder = 0x88;
++	psf_ssc_data->reserved[0] = 0x88;
++
++	ccw = cqr->cpaddr;
++	ccw->cmd_code = DASD_ECKD_CCW_PSF;
++	ccw->cda = (__u32)(addr_t)psf_ssc_data;
++	ccw->count = 66;
++
++	cqr->startdev = device;
++	cqr->memdev = device;
++	cqr->block = NULL;
++	cqr->expires = 10*HZ;
++	cqr->buildclk = get_clock();
++	cqr->status = DASD_CQR_FILLED;
++	return cqr;
+ }
+ 
+ /*
+@@ -629,28 +815,28 @@ dasd_eckd_build_psf_ssc(struct dasd_device *device)
+ static int
+ dasd_eckd_psf_ssc(struct dasd_device *device)
+ {
+-       struct dasd_ccw_req *cqr;
+-       int rc;
+-
+-       cqr = dasd_eckd_build_psf_ssc(device);
+-       if (IS_ERR(cqr))
+-	       return PTR_ERR(cqr);
+-
+-       rc = dasd_sleep_on(cqr);
+-       if (!rc)
+-	       /* trigger CIO to reprobe devices */
+-	       css_schedule_reprobe();
+-       dasd_sfree_request(cqr, cqr->device);
+-       return rc;
++	struct dasd_ccw_req *cqr;
++	int rc;
++
++	cqr = dasd_eckd_build_psf_ssc(device);
++	if (IS_ERR(cqr))
++		return PTR_ERR(cqr);
++
++	rc = dasd_sleep_on(cqr);
++	if (!rc)
++		/* trigger CIO to reprobe devices */
++		css_schedule_reprobe();
++	dasd_sfree_request(cqr, cqr->memdev);
++	return rc;
+ }
+ 
+ /*
+  * Valide storage server of current device.
+  */
+-static int
+-dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid)
++static int dasd_eckd_validate_server(struct dasd_device *device)
+ {
+ 	int rc;
++	struct dasd_eckd_private *private;
+ 
+ 	/* Currently PAV is the only reason to 'validate' server on LPAR */
+ 	if (dasd_nopav || MACHINE_IS_VM)
+@@ -659,9 +845,11 @@ dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid)
+ 	rc = dasd_eckd_psf_ssc(device);
+ 	/* may be requested feature is not available on server,
+ 	 * therefore just report error and go ahead */
++	private = (struct dasd_eckd_private *) device->private;
+ 	DEV_MESSAGE(KERN_INFO, device,
+ 		    "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d",
+-		    uid->vendor, uid->serial, uid->ssid, rc);
++		    private->uid.vendor, private->uid.serial,
++		    private->uid.ssid, rc);
+ 	/* RE-Read Configuration Data */
+ 	return dasd_eckd_read_conf(device);
+ }
+@@ -674,9 +862,9 @@ static int
+ dasd_eckd_check_characteristics(struct dasd_device *device)
+ {
+ 	struct dasd_eckd_private *private;
+-	struct dasd_uid uid;
++	struct dasd_block *block;
+ 	void *rdc_data;
+-	int rc;
++	int is_known, rc;
+ 
+ 	private = (struct dasd_eckd_private *) device->private;
+ 	if (private == NULL) {
+@@ -699,27 +887,54 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
+ 	/* Read Configuration Data */
+ 	rc = dasd_eckd_read_conf(device);
+ 	if (rc)
+-		return rc;
++		goto out_err1;
+ 
+ 	/* Generate device unique id and register in devmap */
+-	rc = dasd_eckd_generate_uid(device, &uid);
++	rc = dasd_eckd_generate_uid(device, &private->uid);
+ 	if (rc)
+-		return rc;
+-	rc = dasd_set_uid(device->cdev, &uid);
+-	if (rc == 1)	/* new server found */
+-		rc = dasd_eckd_validate_server(device, &uid);
++		goto out_err1;
++	dasd_set_uid(device->cdev, &private->uid);
++
++	if (private->uid.type == UA_BASE_DEVICE) {
++		block = dasd_alloc_block();
++		if (IS_ERR(block)) {
++			DEV_MESSAGE(KERN_WARNING, device, "%s",
++				    "could not allocate dasd block structure");
++			rc = PTR_ERR(block);
++			goto out_err1;
++		}
++		device->block = block;
++		block->base = device;
++	}
++
++	/* register lcu with alias handling, enable PAV if this is a new lcu */
++	is_known = dasd_alias_make_device_known_to_lcu(device);
++	if (is_known < 0) {
++		rc = is_known;
++		goto out_err2;
++	}
++	if (!is_known) {
++		/* new lcu found */
++		rc = dasd_eckd_validate_server(device); /* will switch pav on */
++		if (rc)
++			goto out_err3;
++	}
++
++	/* Read Feature Codes */
++	rc = dasd_eckd_read_features(device);
+ 	if (rc)
+-		return rc;
++		goto out_err3;
+ 
+ 	/* Read Device Characteristics */
+ 	rdc_data = (void *) &(private->rdc_data);
+ 	memset(rdc_data, 0, sizeof(rdc_data));
+ 	rc = dasd_generic_read_dev_chars(device, "ECKD", &rdc_data, 64);
+-	if (rc)
++	if (rc) {
+ 		DEV_MESSAGE(KERN_WARNING, device,
+ 			    "Read device characteristics returned "
+ 			    "rc=%d", rc);
+-
++		goto out_err3;
++	}
+ 	DEV_MESSAGE(KERN_INFO, device,
+ 		    "%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d",
+ 		    private->rdc_data.dev_type,
+@@ -729,9 +944,24 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
+ 		    private->rdc_data.no_cyl,
+ 		    private->rdc_data.trk_per_cyl,
+ 		    private->rdc_data.sec_per_trk);
++	return 0;
++
++out_err3:
++	dasd_alias_disconnect_device_from_lcu(device);
++out_err2:
++	dasd_free_block(device->block);
++	device->block = NULL;
++out_err1:
++	kfree(device->private);
++	device->private = NULL;
+ 	return rc;
+ }
+ 
++static void dasd_eckd_uncheck_device(struct dasd_device *device)
++{
++	dasd_alias_disconnect_device_from_lcu(device);
++}
++
+ static struct dasd_ccw_req *
+ dasd_eckd_analysis_ccw(struct dasd_device *device)
+ {
+@@ -755,7 +985,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
+ 	/* Define extent for the first 3 tracks. */
+ 	define_extent(ccw++, cqr->data, 0, 2,
+ 		      DASD_ECKD_CCW_READ_COUNT, device);
+-	LO_data = cqr->data + sizeof (struct DE_eckd_data);
++	LO_data = cqr->data + sizeof(struct DE_eckd_data);
+ 	/* Locate record for the first 4 records on track 0. */
+ 	ccw[-1].flags |= CCW_FLAG_CC;
+ 	locate_record(ccw++, LO_data++, 0, 0, 4,
+@@ -783,7 +1013,9 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
+ 	ccw->count = 8;
+ 	ccw->cda = (__u32)(addr_t) count_data;
+ 
+-	cqr->device = device;
++	cqr->block = NULL;
++	cqr->startdev = device;
++	cqr->memdev = device;
+ 	cqr->retries = 0;
+ 	cqr->buildclk = get_clock();
+ 	cqr->status = DASD_CQR_FILLED;
+@@ -803,7 +1035,7 @@ dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data)
+ 	struct dasd_eckd_private *private;
+ 	struct dasd_device *device;
+ 
+-	device = init_cqr->device;
++	device = init_cqr->startdev;
+ 	private = (struct dasd_eckd_private *) device->private;
+ 	private->init_cqr_status = init_cqr->status;
+ 	dasd_sfree_request(init_cqr, device);
+@@ -811,13 +1043,13 @@ dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data)
+ }
+ 
+ static int
+-dasd_eckd_start_analysis(struct dasd_device *device)
++dasd_eckd_start_analysis(struct dasd_block *block)
+ {
+ 	struct dasd_eckd_private *private;
+ 	struct dasd_ccw_req *init_cqr;
+ 
+-	private = (struct dasd_eckd_private *) device->private;
+-	init_cqr = dasd_eckd_analysis_ccw(device);
++	private = (struct dasd_eckd_private *) block->base->private;
++	init_cqr = dasd_eckd_analysis_ccw(block->base);
+ 	if (IS_ERR(init_cqr))
+ 		return PTR_ERR(init_cqr);
+ 	init_cqr->callback = dasd_eckd_analysis_callback;
+@@ -828,13 +1060,15 @@ dasd_eckd_start_analysis(struct dasd_device *device)
+ }
+ 
+ static int
+-dasd_eckd_end_analysis(struct dasd_device *device)
++dasd_eckd_end_analysis(struct dasd_block *block)
+ {
++	struct dasd_device *device;
+ 	struct dasd_eckd_private *private;
+ 	struct eckd_count *count_area;
+ 	unsigned int sb, blk_per_trk;
+ 	int status, i;
+ 
++	device = block->base;
+ 	private = (struct dasd_eckd_private *) device->private;
+ 	status = private->init_cqr_status;
+ 	private->init_cqr_status = -1;
+@@ -846,7 +1080,7 @@ dasd_eckd_end_analysis(struct dasd_device *device)
+ 
+ 	private->uses_cdl = 1;
+ 	/* Calculate number of blocks/records per track. */
+-	blk_per_trk = recs_per_track(&private->rdc_data, 0, device->bp_block);
++	blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
+ 	/* Check Track 0 for Compatible Disk Layout */
+ 	count_area = NULL;
+ 	for (i = 0; i < 3; i++) {
+@@ -876,56 +1110,65 @@ dasd_eckd_end_analysis(struct dasd_device *device)
+ 	if (count_area != NULL && count_area->kl == 0) {
+ 		/* we found notthing violating our disk layout */
+ 		if (dasd_check_blocksize(count_area->dl) == 0)
+-			device->bp_block = count_area->dl;
++			block->bp_block = count_area->dl;
+ 	}
+-	if (device->bp_block == 0) {
++	if (block->bp_block == 0) {
+ 		DEV_MESSAGE(KERN_WARNING, device, "%s",
+ 			    "Volume has incompatible disk layout");
+ 		return -EMEDIUMTYPE;
+ 	}
+-	device->s2b_shift = 0;	/* bits to shift 512 to get a block */
+-	for (sb = 512; sb < device->bp_block; sb = sb << 1)
+-		device->s2b_shift++;
++	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
++	for (sb = 512; sb < block->bp_block; sb = sb << 1)
++		block->s2b_shift++;
+ 
+-	blk_per_trk = recs_per_track(&private->rdc_data, 0, device->bp_block);
+-	device->blocks = (private->rdc_data.no_cyl *
++	blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
++	block->blocks = (private->rdc_data.no_cyl *
+ 			  private->rdc_data.trk_per_cyl *
+ 			  blk_per_trk);
+ 
+ 	DEV_MESSAGE(KERN_INFO, device,
+ 		    "(%dkB blks): %dkB at %dkB/trk %s",
+-		    (device->bp_block >> 10),
++		    (block->bp_block >> 10),
+ 		    ((private->rdc_data.no_cyl *
+ 		      private->rdc_data.trk_per_cyl *
+-		      blk_per_trk * (device->bp_block >> 9)) >> 1),
+-		    ((blk_per_trk * device->bp_block) >> 10),
++		      blk_per_trk * (block->bp_block >> 9)) >> 1),
++		    ((blk_per_trk * block->bp_block) >> 10),
+ 		    private->uses_cdl ?
+ 		    "compatible disk layout" : "linux disk layout");
+ 
+ 	return 0;
+ }
+ 
+-static int
+-dasd_eckd_do_analysis(struct dasd_device *device)
++static int dasd_eckd_do_analysis(struct dasd_block *block)
+ {
+ 	struct dasd_eckd_private *private;
+ 
+-	private = (struct dasd_eckd_private *) device->private;
++	private = (struct dasd_eckd_private *) block->base->private;
+ 	if (private->init_cqr_status < 0)
+-		return dasd_eckd_start_analysis(device);
++		return dasd_eckd_start_analysis(block);
+ 	else
+-		return dasd_eckd_end_analysis(device);
++		return dasd_eckd_end_analysis(block);
+ }
+ 
++static int dasd_eckd_ready_to_online(struct dasd_device *device)
++{
++	return dasd_alias_add_device(device);
++};
++
++static int dasd_eckd_online_to_ready(struct dasd_device *device)
++{
++	return dasd_alias_remove_device(device);
++};
++
+ static int
+-dasd_eckd_fill_geometry(struct dasd_device *device, struct hd_geometry *geo)
++dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
+ {
+ 	struct dasd_eckd_private *private;
+ 
+-	private = (struct dasd_eckd_private *) device->private;
+-	if (dasd_check_blocksize(device->bp_block) == 0) {
++	private = (struct dasd_eckd_private *) block->base->private;
++	if (dasd_check_blocksize(block->bp_block) == 0) {
+ 		geo->sectors = recs_per_track(&private->rdc_data,
+-					      0, device->bp_block);
++					      0, block->bp_block);
+ 	}
+ 	geo->cylinders = private->rdc_data.no_cyl;
+ 	geo->heads = private->rdc_data.trk_per_cyl;
+@@ -1037,7 +1280,7 @@ dasd_eckd_format_device(struct dasd_device * device,
+ 		locate_record(ccw++, (struct LO_eckd_data *) data,
+ 			      fdata->start_unit, 0, rpt + 1,
+ 			      DASD_ECKD_CCW_WRITE_RECORD_ZERO, device,
+-			      device->bp_block);
++			      device->block->bp_block);
+ 		data += sizeof(struct LO_eckd_data);
+ 		break;
+ 	case 0x04: /* Invalidate track. */
+@@ -1110,43 +1353,28 @@ dasd_eckd_format_device(struct dasd_device * device,
+ 			ccw++;
+ 		}
+ 	}
+-	fcp->device = device;
+-	fcp->retries = 2;	/* set retry counter to enable ERP */
++	fcp->startdev = device;
++	fcp->memdev = device;
++	clear_bit(DASD_CQR_FLAGS_USE_ERP, &fcp->flags);
++	fcp->retries = 5;	/* set retry counter to enable default ERP */
+ 	fcp->buildclk = get_clock();
+ 	fcp->status = DASD_CQR_FILLED;
+ 	return fcp;
+ }
+ 
+-static dasd_era_t
+-dasd_eckd_examine_error(struct dasd_ccw_req * cqr, struct irb * irb)
++static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
+ {
+-	struct dasd_device *device = (struct dasd_device *) cqr->device;
+-	struct ccw_device *cdev = device->cdev;
+-
+-	if (irb->scsw.cstat == 0x00 &&
+-	    irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+-		return dasd_era_none;
+-
+-	switch (cdev->id.cu_type) {
+-	case 0x3990:
+-	case 0x2105:
+-	case 0x2107:
+-	case 0x1750:
+-		return dasd_3990_erp_examine(cqr, irb);
+-	case 0x9343:
+-		return dasd_9343_erp_examine(cqr, irb);
+-	case 0x3880:
+-	default:
+-		DEV_MESSAGE(KERN_WARNING, device, "%s",
+-			    "default (unknown CU type) - RECOVERABLE return");
+-		return dasd_era_recover;
++	cqr->status = DASD_CQR_FILLED;
++	if (cqr->block && (cqr->startdev != cqr->block->base)) {
++		dasd_eckd_reset_ccw_to_base_io(cqr);
++		cqr->startdev = cqr->block->base;
+ 	}
+-}
++};
+ 
+ static dasd_erp_fn_t
+ dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
+ {
+-	struct dasd_device *device = (struct dasd_device *) cqr->device;
++	struct dasd_device *device = (struct dasd_device *) cqr->startdev;
+ 	struct ccw_device *cdev = device->cdev;
+ 
+ 	switch (cdev->id.cu_type) {
+@@ -1168,8 +1396,37 @@ dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
+ 	return dasd_default_erp_postaction;
+ }
+ 
+-static struct dasd_ccw_req *
+-dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
++
++static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
++						   struct irb *irb)
++{
++	char mask;
++
++	/* first of all check for state change pending interrupt */
++	mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
++	if ((irb->scsw.dstat & mask) == mask) {
++		dasd_generic_handle_state_change(device);
++		return;
++	}
++
++	/* summary unit check */
++	if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && irb->ecw[7] == 0x0D) {
++		dasd_alias_handle_summary_unit_check(device, irb);
++		return;
++	}
++
++	/* just report other unsolicited interrupts */
++	DEV_MESSAGE(KERN_DEBUG, device, "%s",
++		    "unsolicited interrupt received");
++	device->discipline->dump_sense(device, NULL, irb);
++	dasd_schedule_device_bh(device);
++
++	return;
++};
++
++static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
++					       struct dasd_block *block,
++					       struct request *req)
+ {
+ 	struct dasd_eckd_private *private;
+ 	unsigned long *idaws;
+@@ -1185,8 +1442,11 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
+ 	sector_t first_trk, last_trk;
+ 	unsigned int first_offs, last_offs;
+ 	unsigned char cmd, rcmd;
++	int use_prefix;
++	struct dasd_device *basedev;
+ 
+-	private = (struct dasd_eckd_private *) device->private;
++	basedev = block->base;
++	private = (struct dasd_eckd_private *) basedev->private;
+ 	if (rq_data_dir(req) == READ)
+ 		cmd = DASD_ECKD_CCW_READ_MT;
+ 	else if (rq_data_dir(req) == WRITE)
+@@ -1194,13 +1454,13 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
+ 	else
+ 		return ERR_PTR(-EINVAL);
+ 	/* Calculate number of blocks/records per track. */
+-	blksize = device->bp_block;
++	blksize = block->bp_block;
+ 	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+ 	/* Calculate record id of first and last block. */
+-	first_rec = first_trk = req->sector >> device->s2b_shift;
++	first_rec = first_trk = req->sector >> block->s2b_shift;
+ 	first_offs = sector_div(first_trk, blk_per_trk);
+ 	last_rec = last_trk =
+-		(req->sector + req->nr_sectors - 1) >> device->s2b_shift;
++		(req->sector + req->nr_sectors - 1) >> block->s2b_shift;
+ 	last_offs = sector_div(last_trk, blk_per_trk);
+ 	/* Check struct bio and count the number of blocks for the request. */
+ 	count = 0;
+@@ -1209,20 +1469,33 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
+ 		if (bv->bv_len & (blksize - 1))
+ 			/* Eckd can only do full blocks. */
+ 			return ERR_PTR(-EINVAL);
+-		count += bv->bv_len >> (device->s2b_shift + 9);
++		count += bv->bv_len >> (block->s2b_shift + 9);
+ #if defined(CONFIG_64BIT)
+ 		if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
+-			cidaw += bv->bv_len >> (device->s2b_shift + 9);
++			cidaw += bv->bv_len >> (block->s2b_shift + 9);
+ #endif
+ 	}
+ 	/* Paranoia. */
+ 	if (count != last_rec - first_rec + 1)
+ 		return ERR_PTR(-EINVAL);
+-	/* 1x define extent + 1x locate record + number of blocks */
+-	cplength = 2 + count;
+-	/* 1x define extent + 1x locate record + cidaws*sizeof(long) */
+-	datasize = sizeof(struct DE_eckd_data) + sizeof(struct LO_eckd_data) +
+-		cidaw * sizeof(unsigned long);
++
++	/* use the prefix command if available */
++	use_prefix = private->features.feature[8] & 0x01;
++	if (use_prefix) {
++		/* 1x prefix + number of blocks */
++		cplength = 2 + count;
++		/* 1x prefix + cidaws*sizeof(long) */
++		datasize = sizeof(struct PFX_eckd_data) +
++			sizeof(struct LO_eckd_data) +
++			cidaw * sizeof(unsigned long);
++	} else {
++		/* 1x define extent + 1x locate record + number of blocks */
++		cplength = 2 + count;
++		/* 1x define extent + 1x locate record + cidaws*sizeof(long) */
++		datasize = sizeof(struct DE_eckd_data) +
++			sizeof(struct LO_eckd_data) +
++			cidaw * sizeof(unsigned long);
++	}
+ 	/* Find out the number of additional locate record ccws for cdl. */
+ 	if (private->uses_cdl && first_rec < 2*blk_per_trk) {
+ 		if (last_rec >= 2*blk_per_trk)
+@@ -1232,26 +1505,42 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
+ 	}
+ 	/* Allocate the ccw request. */
+ 	cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
+-				   cplength, datasize, device);
++				   cplength, datasize, startdev);
+ 	if (IS_ERR(cqr))
+ 		return cqr;
+ 	ccw = cqr->cpaddr;
+-	/* First ccw is define extent. */
+-	if (define_extent(ccw++, cqr->data, first_trk,
+-			  last_trk, cmd, device) == -EAGAIN) {
+-		/* Clock not in sync and XRC is enabled. Try again later. */
+-		dasd_sfree_request(cqr, device);
+-		return ERR_PTR(-EAGAIN);
++	/* First ccw is define extent or prefix. */
++	if (use_prefix) {
++		if (prefix(ccw++, cqr->data, first_trk,
++			   last_trk, cmd, basedev, startdev) == -EAGAIN) {
++			/* Clock not in sync and XRC is enabled.
++			 * Try again later.
++			 */
++			dasd_sfree_request(cqr, startdev);
++			return ERR_PTR(-EAGAIN);
++		}
++		idaws = (unsigned long *) (cqr->data +
++					   sizeof(struct PFX_eckd_data));
++	} else {
++		if (define_extent(ccw++, cqr->data, first_trk,
++				  last_trk, cmd, startdev) == -EAGAIN) {
++			/* Clock not in sync and XRC is enabled.
++			 * Try again later.
++			 */
++			dasd_sfree_request(cqr, startdev);
++			return ERR_PTR(-EAGAIN);
++		}
++		idaws = (unsigned long *) (cqr->data +
++					   sizeof(struct DE_eckd_data));
+ 	}
+ 	/* Build locate_record+read/write/ccws. */
+-	idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data));
+ 	LO_data = (struct LO_eckd_data *) (idaws + cidaw);
+ 	recid = first_rec;
+ 	if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
+ 		/* Only standard blocks so there is just one locate record. */
+ 		ccw[-1].flags |= CCW_FLAG_CC;
+ 		locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
+-			      last_rec - recid + 1, cmd, device, blksize);
++			      last_rec - recid + 1, cmd, basedev, blksize);
+ 	}
+ 	rq_for_each_segment(bv, req, iter) {
+ 		dst = page_address(bv->bv_page) + bv->bv_offset;
+@@ -1281,7 +1570,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
+ 				ccw[-1].flags |= CCW_FLAG_CC;
+ 				locate_record(ccw++, LO_data++,
+ 					      trkid, recoffs + 1,
+-					      1, rcmd, device, count);
++					      1, rcmd, basedev, count);
+ 			}
+ 			/* Locate record for standard blocks ? */
+ 			if (private->uses_cdl && recid == 2*blk_per_trk) {
+@@ -1289,7 +1578,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
+ 				locate_record(ccw++, LO_data++,
+ 					      trkid, recoffs + 1,
+ 					      last_rec - recid + 1,
+-					      cmd, device, count);
++					      cmd, basedev, count);
+ 			}
+ 			/* Read/write ccw. */
+ 			ccw[-1].flags |= CCW_FLAG_CC;
+@@ -1310,7 +1599,9 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
+ 	}
+ 	if (req->cmd_flags & REQ_FAILFAST)
+ 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+-	cqr->device = device;
++	cqr->startdev = startdev;
++	cqr->memdev = startdev;
++	cqr->block = block;
+ 	cqr->expires = 5 * 60 * HZ;	/* 5 minutes */
+ 	cqr->lpm = private->path_data.ppm;
+ 	cqr->retries = 256;
+@@ -1333,10 +1624,10 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+ 
+ 	if (!dasd_page_cache)
+ 		goto out;
+-	private = (struct dasd_eckd_private *) cqr->device->private;
+-	blksize = cqr->device->bp_block;
++	private = (struct dasd_eckd_private *) cqr->block->base->private;
++	blksize = cqr->block->bp_block;
+ 	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+-	recid = req->sector >> cqr->device->s2b_shift;
++	recid = req->sector >> cqr->block->s2b_shift;
+ 	ccw = cqr->cpaddr;
+ 	/* Skip over define extent & locate record. */
+ 	ccw++;
+@@ -1367,10 +1658,71 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+ 	}
+ out:
+ 	status = cqr->status == DASD_CQR_DONE;
+-	dasd_sfree_request(cqr, cqr->device);
++	dasd_sfree_request(cqr, cqr->memdev);
+ 	return status;
+ }
+ 
++/*
++ * Modify ccw chain in cqr so it can be started on a base device.
++ *
++ * Note that this is not enough to restart the cqr!
++ * Either reset cqr->startdev as well (summary unit check handling)
++ * or restart via separate cqr (as in ERP handling).
++ */
++void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
++{
++	struct ccw1 *ccw;
++	struct PFX_eckd_data *pfxdata;
++
++	ccw = cqr->cpaddr;
++	pfxdata = cqr->data;
++
++	if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
++		pfxdata->validity.verify_base = 0;
++		pfxdata->validity.hyper_pav = 0;
++	}
++}
++
++#define DASD_ECKD_CHANQ_MAX_SIZE 4
++
++static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
++						     struct dasd_block *block,
++						     struct request *req)
++{
++	struct dasd_eckd_private *private;
++	struct dasd_device *startdev;
++	unsigned long flags;
++	struct dasd_ccw_req *cqr;
++
++	startdev = dasd_alias_get_start_dev(base);
++	if (!startdev)
++		startdev = base;
++	private = (struct dasd_eckd_private *) startdev->private;
++	if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
++		return ERR_PTR(-EBUSY);
++
++	spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
++	private->count++;
++	cqr = dasd_eckd_build_cp(startdev, block, req);
++	if (IS_ERR(cqr))
++		private->count--;
++	spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
++	return cqr;
++}
++
++static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
++				   struct request *req)
++{
++	struct dasd_eckd_private *private;
++	unsigned long flags;
++
++	spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
++	private = (struct dasd_eckd_private *) cqr->memdev->private;
++	private->count--;
++	spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
++	return dasd_eckd_free_cp(cqr, req);
++}
++
+ static int
+ dasd_eckd_fill_info(struct dasd_device * device,
+ 		    struct dasd_information2_t * info)
+@@ -1384,9 +1736,9 @@ dasd_eckd_fill_info(struct dasd_device * device,
+ 	info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
+ 	memcpy(info->characteristics, &private->rdc_data,
+ 	       sizeof(struct dasd_eckd_characteristics));
+-	info->confdata_size = sizeof (struct dasd_eckd_confdata);
++	info->confdata_size = sizeof(struct dasd_eckd_confdata);
+ 	memcpy(info->configuration_data, &private->conf_data,
+-	       sizeof (struct dasd_eckd_confdata));
++	       sizeof(struct dasd_eckd_confdata));
+ 	return 0;
+ }
+ 
+@@ -1419,7 +1771,8 @@ dasd_eckd_release(struct dasd_device *device)
+         cqr->cpaddr->flags |= CCW_FLAG_SLI;
+         cqr->cpaddr->count = 32;
+ 	cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
+-	cqr->device = device;
++	cqr->startdev = device;
++	cqr->memdev = device;
+ 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+ 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
+@@ -1429,7 +1782,7 @@ dasd_eckd_release(struct dasd_device *device)
+ 
+ 	rc = dasd_sleep_on_immediatly(cqr);
+ 
+-	dasd_sfree_request(cqr, cqr->device);
++	dasd_sfree_request(cqr, cqr->memdev);
+ 	return rc;
+ }
+ 
+@@ -1459,7 +1812,8 @@ dasd_eckd_reserve(struct dasd_device *device)
+         cqr->cpaddr->flags |= CCW_FLAG_SLI;
+         cqr->cpaddr->count = 32;
+ 	cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
+-	cqr->device = device;
++	cqr->startdev = device;
++	cqr->memdev = device;
+ 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+ 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
+@@ -1469,7 +1823,7 @@ dasd_eckd_reserve(struct dasd_device *device)
+ 
+ 	rc = dasd_sleep_on_immediatly(cqr);
+ 
+-	dasd_sfree_request(cqr, cqr->device);
++	dasd_sfree_request(cqr, cqr->memdev);
+ 	return rc;
+ }
+ 
+@@ -1498,7 +1852,8 @@ dasd_eckd_steal_lock(struct dasd_device *device)
+         cqr->cpaddr->flags |= CCW_FLAG_SLI;
+         cqr->cpaddr->count = 32;
+ 	cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
+-	cqr->device = device;
++	cqr->startdev = device;
++	cqr->memdev = device;
+ 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+ 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
+@@ -1508,7 +1863,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
+ 
+ 	rc = dasd_sleep_on_immediatly(cqr);
+ 
+-	dasd_sfree_request(cqr, cqr->device);
++	dasd_sfree_request(cqr, cqr->memdev);
+ 	return rc;
+ }
+ 
+@@ -1526,52 +1881,52 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
+ 
+ 	cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
+ 				   1 /* PSF */  + 1 /* RSSD */ ,
+-				   (sizeof (struct dasd_psf_prssd_data) +
+-				    sizeof (struct dasd_rssd_perf_stats_t)),
++				   (sizeof(struct dasd_psf_prssd_data) +
++				    sizeof(struct dasd_rssd_perf_stats_t)),
+ 				   device);
+ 	if (IS_ERR(cqr)) {
+ 		DEV_MESSAGE(KERN_WARNING, device, "%s",
+ 			    "Could not allocate initialization request");
+ 		return PTR_ERR(cqr);
+ 	}
+-	cqr->device = device;
++	cqr->startdev = device;
++	cqr->memdev = device;
+ 	cqr->retries = 0;
+ 	cqr->expires = 10 * HZ;
+ 
+ 	/* Prepare for Read Subsystem Data */
+ 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+-	memset(prssdp, 0, sizeof (struct dasd_psf_prssd_data));
++	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+ 	prssdp->order = PSF_ORDER_PRSSD;
+-	prssdp->suborder = 0x01;	/* Perfomance Statistics */
++	prssdp->suborder = 0x01;	/* Performance Statistics */
+ 	prssdp->varies[1] = 0x01;	/* Perf Statistics for the Subsystem */
+ 
+ 	ccw = cqr->cpaddr;
+ 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
+-	ccw->count = sizeof (struct dasd_psf_prssd_data);
++	ccw->count = sizeof(struct dasd_psf_prssd_data);
+ 	ccw->flags |= CCW_FLAG_CC;
+ 	ccw->cda = (__u32)(addr_t) prssdp;
+ 
+ 	/* Read Subsystem Data - Performance Statistics */
+ 	stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
+-	memset(stats, 0, sizeof (struct dasd_rssd_perf_stats_t));
++	memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
+ 
+ 	ccw++;
+ 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+-	ccw->count = sizeof (struct dasd_rssd_perf_stats_t);
++	ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
+ 	ccw->cda = (__u32)(addr_t) stats;
+ 
+ 	cqr->buildclk = get_clock();
+ 	cqr->status = DASD_CQR_FILLED;
+ 	rc = dasd_sleep_on(cqr);
+ 	if (rc == 0) {
+-		/* Prepare for Read Subsystem Data */
+ 		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+ 		stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
+ 		if (copy_to_user(argp, stats,
+ 				 sizeof(struct dasd_rssd_perf_stats_t)))
+ 			rc = -EFAULT;
+ 	}
+-	dasd_sfree_request(cqr, cqr->device);
++	dasd_sfree_request(cqr, cqr->memdev);
+ 	return rc;
+ }
+ 
+@@ -1594,7 +1949,7 @@ dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
+ 
+ 	rc = 0;
+ 	if (copy_to_user(argp, (long *) &attrib,
+-			 sizeof (struct attrib_data_t)))
++			 sizeof(struct attrib_data_t)))
+ 		rc = -EFAULT;
+ 
+ 	return rc;
+@@ -1627,8 +1982,10 @@ dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
+ }
+ 
+ static int
+-dasd_eckd_ioctl(struct dasd_device *device, unsigned int cmd, void __user *argp)
++dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
+ {
++	struct dasd_device *device = block->base;
++
+ 	switch (cmd) {
+ 	case BIODASDGATTR:
+ 		return dasd_eckd_get_attrib(device, argp);
+@@ -1685,9 +2042,8 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
+  * Print sense data and related channel program.
+  * Parts are printed because printk buffer is only 1024 bytes.
+  */
+-static void
+-dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
+-		     struct irb *irb)
++static void dasd_eckd_dump_sense(struct dasd_device *device,
++				 struct dasd_ccw_req *req, struct irb *irb)
+ {
+ 	char *page;
+ 	struct ccw1 *first, *last, *fail, *from, *to;
+@@ -1743,37 +2099,40 @@ dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
+ 	}
+ 	printk("%s", page);
+ 
+-	/* dump the Channel Program (max 140 Bytes per line) */
+-	/* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
+-	first = req->cpaddr;
+-	for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
+-	to = min(first + 6, last);
+-	len = sprintf(page,  KERN_ERR PRINTK_HEADER
+-		      " Related CP in req: %p\n", req);
+-	dasd_eckd_dump_ccw_range(first, to, page + len);
+-	printk("%s", page);
++	if (req) {
++		/* req == NULL for unsolicited interrupts */
++		/* dump the Channel Program (max 140 Bytes per line) */
++		/* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
++		first = req->cpaddr;
++		for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
++		to = min(first + 6, last);
++		len = sprintf(page,  KERN_ERR PRINTK_HEADER
++			      " Related CP in req: %p\n", req);
++		dasd_eckd_dump_ccw_range(first, to, page + len);
++		printk("%s", page);
+ 
+-	/* print failing CCW area (maximum 4) */
+-	/* scsw->cda is either valid or zero  */
+-	len = 0;
+-	from = ++to;
+-	fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */
+-	if (from <  fail - 2) {
+-		from = fail - 2;     /* there is a gap - print header */
+-		len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
+-	}
+-	to = min(fail + 1, last);
+-	len += dasd_eckd_dump_ccw_range(from, to, page + len);
+-
+-	/* print last CCWs (maximum 2) */
+-	from = max(from, ++to);
+-	if (from < last - 1) {
+-		from = last - 1;     /* there is a gap - print header */
+-		len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
++		/* print failing CCW area (maximum 4) */
++		/* scsw->cda is either valid or zero  */
++		len = 0;
++		from = ++to;
++		fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */
++		if (from <  fail - 2) {
++			from = fail - 2;     /* there is a gap - print header */
++			len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
++		}
++		to = min(fail + 1, last);
++		len += dasd_eckd_dump_ccw_range(from, to, page + len);
++
++		/* print last CCWs (maximum 2) */
++		from = max(from, ++to);
++		if (from < last - 1) {
++			from = last - 1;     /* there is a gap - print header */
++			len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
++		}
++		len += dasd_eckd_dump_ccw_range(from, last, page + len);
++		if (len > 0)
++			printk("%s", page);
+ 	}
+-	len += dasd_eckd_dump_ccw_range(from, last, page + len);
+-	if (len > 0)
+-		printk("%s", page);
+ 	free_page((unsigned long) page);
+ }
+ 
+@@ -1796,16 +2155,20 @@ static struct dasd_discipline dasd_eckd_discipline = {
+ 	.ebcname = "ECKD",
+ 	.max_blocks = 240,
+ 	.check_device = dasd_eckd_check_characteristics,
++	.uncheck_device = dasd_eckd_uncheck_device,
+ 	.do_analysis = dasd_eckd_do_analysis,
++	.ready_to_online = dasd_eckd_ready_to_online,
++	.online_to_ready = dasd_eckd_online_to_ready,
+ 	.fill_geometry = dasd_eckd_fill_geometry,
+ 	.start_IO = dasd_start_IO,
+ 	.term_IO = dasd_term_IO,
++	.handle_terminated_request = dasd_eckd_handle_terminated_request,
+ 	.format_device = dasd_eckd_format_device,
+-	.examine_error = dasd_eckd_examine_error,
+ 	.erp_action = dasd_eckd_erp_action,
+ 	.erp_postaction = dasd_eckd_erp_postaction,
+-	.build_cp = dasd_eckd_build_cp,
+-	.free_cp = dasd_eckd_free_cp,
++	.handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt,
++	.build_cp = dasd_eckd_build_alias_cp,
++	.free_cp = dasd_eckd_free_alias_cp,
+ 	.dump_sense = dasd_eckd_dump_sense,
+ 	.fill_info = dasd_eckd_fill_info,
+ 	.ioctl = dasd_eckd_ioctl,
+diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
+index 712ff16..fc2509c 100644
+--- a/drivers/s390/block/dasd_eckd.h
++++ b/drivers/s390/block/dasd_eckd.h
+@@ -39,6 +39,8 @@
+ #define DASD_ECKD_CCW_READ_CKD_MT	 0x9e
+ #define DASD_ECKD_CCW_WRITE_CKD_MT	 0x9d
+ #define DASD_ECKD_CCW_RESERVE		 0xB4
++#define DASD_ECKD_CCW_PFX		 0xE7
++#define DASD_ECKD_CCW_RSCK		 0xF9
+ 
+ /*
+  * Perform Subsystem Function / Sub-Orders
+@@ -137,6 +139,25 @@ struct LO_eckd_data {
+ 	__u16 length;
+ } __attribute__ ((packed));
+ 
++/* Prefix data for format 0x00 and 0x01 */
++struct PFX_eckd_data {
++	unsigned char format;
++	struct {
++		unsigned char define_extend:1;
++		unsigned char time_stamp:1;
++		unsigned char verify_base:1;
++		unsigned char hyper_pav:1;
++		unsigned char reserved:4;
++	} __attribute__ ((packed)) validity;
++	__u8 base_address;
++	__u8 aux;
++	__u8 base_lss;
++	__u8 reserved[7];
++	struct DE_eckd_data define_extend;
++	struct LO_eckd_data locate_record;
++	__u8 LO_extended_data[4];
++} __attribute__ ((packed));
++
+ struct dasd_eckd_characteristics {
+ 	__u16 cu_type;
+ 	struct {
+@@ -254,7 +275,9 @@ struct dasd_eckd_confdata {
+ 		} __attribute__ ((packed)) ned;
+ 		struct {
+ 			unsigned char flags;            /* byte  0    */
+-			unsigned char res2[7];          /* byte  1- 7 */
++			unsigned char res1;		/* byte  1    */
++			__u16 format;			/* byte  2-3  */
++			unsigned char res2[4];		/* byte  4-7  */
+ 			unsigned char sua_flags;	/* byte  8    */
+ 			__u8 base_unit_addr;            /* byte  9    */
+ 			unsigned char res3[22];	        /* byte 10-31 */
+@@ -343,6 +366,11 @@ struct dasd_eckd_path {
+ 	__u8 npm;
+ };
+ 
++struct dasd_rssd_features {
++	char feature[256];
++} __attribute__((packed));
++
++
+ /*
+  * Perform Subsystem Function - Prepare for Read Subsystem Data
+  */
+@@ -365,4 +393,99 @@ struct dasd_psf_ssc_data {
+ 	unsigned char reserved[59];
+ } __attribute__((packed));
+ 
++
++/*
++ * some structures and definitions for alias handling
++ */
++struct dasd_unit_address_configuration {
++	struct {
++		char ua_type;
++		char base_ua;
++	} unit[256];
++} __attribute__((packed));
++
++
++#define MAX_DEVICES_PER_LCU 256
++
++/* flags on the LCU  */
++#define NEED_UAC_UPDATE  0x01
++#define UPDATE_PENDING	0x02
++
++enum pavtype {NO_PAV, BASE_PAV, HYPER_PAV};
++
++
++struct alias_root {
++	struct list_head serverlist;
++	spinlock_t lock;
++};
++
++struct alias_server {
++	struct list_head server;
++	struct dasd_uid uid;
++	struct list_head lculist;
++};
++
++struct summary_unit_check_work_data {
++	char reason;
++	struct dasd_device *device;
++	struct work_struct worker;
++};
++
++struct read_uac_work_data {
++	struct dasd_device *device;
++	struct delayed_work dwork;
++};
++
++struct alias_lcu {
++	struct list_head lcu;
++	struct dasd_uid uid;
++	enum pavtype pav;
++	char flags;
++	spinlock_t lock;
++	struct list_head grouplist;
++	struct list_head active_devices;
++	struct list_head inactive_devices;
++	struct dasd_unit_address_configuration *uac;
++	struct summary_unit_check_work_data suc_data;
++	struct read_uac_work_data ruac_data;
++	struct dasd_ccw_req *rsu_cqr;
++};
++
++struct alias_pav_group {
++	struct list_head group;
++	struct dasd_uid uid;
++	struct alias_lcu *lcu;
++	struct list_head baselist;
++	struct list_head aliaslist;
++	struct dasd_device *next;
++};
++
++
++struct dasd_eckd_private {
++	struct dasd_eckd_characteristics rdc_data;
++	struct dasd_eckd_confdata conf_data;
++	struct dasd_eckd_path path_data;
++	struct eckd_count count_area[5];
++	int init_cqr_status;
++	int uses_cdl;
++	struct attrib_data_t attrib;	/* e.g. cache operations */
++	struct dasd_rssd_features features;
++
++	/* alias managemnet */
++	struct dasd_uid uid;
++	struct alias_pav_group *pavgroup;
++	struct alias_lcu *lcu;
++	int count;
++};
++
++
++
++int dasd_alias_make_device_known_to_lcu(struct dasd_device *);
++void dasd_alias_disconnect_device_from_lcu(struct dasd_device *);
++int dasd_alias_add_device(struct dasd_device *);
++int dasd_alias_remove_device(struct dasd_device *);
++struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
++void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
++void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
++
+ #endif				/* DASD_ECKD_H */
+diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
+index 0c081a6..6e53ab6 100644
+--- a/drivers/s390/block/dasd_eer.c
++++ b/drivers/s390/block/dasd_eer.c
+@@ -336,7 +336,7 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
+ 	unsigned long flags;
+ 	struct eerbuffer *eerb;
+ 
+-	snss_rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
++	snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
+ 	if (snss_rc)
+ 		data_size = 0;
+ 	else
+@@ -404,10 +404,11 @@ void dasd_eer_snss(struct dasd_device *device)
+ 		set_bit(DASD_FLAG_EER_SNSS, &device->flags);
+ 		return;
+ 	}
++	/* cdev is already locked, can't use dasd_add_request_head */
+ 	clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
+ 	cqr->status = DASD_CQR_QUEUED;
+-	list_add(&cqr->list, &device->ccw_queue);
+-	dasd_schedule_bh(device);
++	list_add(&cqr->devlist, &device->ccw_queue);
++	dasd_schedule_device_bh(device);
+ }
+ 
+ /*
+@@ -415,7 +416,7 @@ void dasd_eer_snss(struct dasd_device *device)
+  */
+ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
+ {
+-        struct dasd_device *device = cqr->device;
++	struct dasd_device *device = cqr->startdev;
+ 	unsigned long flags;
+ 
+ 	dasd_eer_write(device, cqr, DASD_EER_STATECHANGE);
+@@ -458,7 +459,7 @@ int dasd_eer_enable(struct dasd_device *device)
+ 	if (!cqr)
+ 		return -ENOMEM;
+ 
+-	cqr->device = device;
++	cqr->startdev = device;
+ 	cqr->retries = 255;
+ 	cqr->expires = 10 * HZ;
+ 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
+index caa5d91..8f10000 100644
+--- a/drivers/s390/block/dasd_erp.c
++++ b/drivers/s390/block/dasd_erp.c
+@@ -46,6 +46,8 @@ dasd_alloc_erp_request(char *magic, int cplength, int datasize,
+ 	if (cqr == NULL)
+ 		return ERR_PTR(-ENOMEM);
+ 	memset(cqr, 0, sizeof(struct dasd_ccw_req));
++	INIT_LIST_HEAD(&cqr->devlist);
++	INIT_LIST_HEAD(&cqr->blocklist);
+ 	data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
+ 	cqr->cpaddr = NULL;
+ 	if (cplength > 0) {
+@@ -66,7 +68,7 @@ dasd_alloc_erp_request(char *magic, int cplength, int datasize,
+ }
+ 
+ void
+-dasd_free_erp_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
++dasd_free_erp_request(struct dasd_ccw_req *cqr, struct dasd_device * device)
+ {
+ 	unsigned long flags;
+ 
+@@ -81,11 +83,11 @@ dasd_free_erp_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
+  * dasd_default_erp_action just retries the current cqr
+  */
+ struct dasd_ccw_req *
+-dasd_default_erp_action(struct dasd_ccw_req * cqr)
++dasd_default_erp_action(struct dasd_ccw_req *cqr)
+ {
+ 	struct dasd_device *device;
+ 
+-	device = cqr->device;
++	device = cqr->startdev;
+ 
+         /* just retry - there is nothing to save ... I got no sense data.... */
+         if (cqr->retries > 0) {
+@@ -93,12 +95,12 @@ dasd_default_erp_action(struct dasd_ccw_req * cqr)
+                              "default ERP called (%i retries left)",
+                              cqr->retries);
+ 		cqr->lpm    = LPM_ANYPATH;
+-		cqr->status = DASD_CQR_QUEUED;
++		cqr->status = DASD_CQR_FILLED;
+         } else {
+                 DEV_MESSAGE (KERN_WARNING, device, "%s",
+ 			     "default ERP called (NO retry left)");
+ 		cqr->status = DASD_CQR_FAILED;
+-		cqr->stopclk = get_clock ();
++		cqr->stopclk = get_clock();
+         }
+         return cqr;
+ }				/* end dasd_default_erp_action */
+@@ -117,15 +119,12 @@ dasd_default_erp_action(struct dasd_ccw_req * cqr)
+  * RETURN VALUES
+  *   cqr		pointer to the original CQR
+  */
+-struct dasd_ccw_req *
+-dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
++struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr)
+ {
+-	struct dasd_device *device;
+ 	int success;
+ 
+ 	BUG_ON(cqr->refers == NULL || cqr->function == NULL);
+ 
+-	device = cqr->device;
+ 	success = cqr->status == DASD_CQR_DONE;
+ 
+ 	/* free all ERPs - but NOT the original cqr */
+@@ -133,10 +132,10 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
+ 		struct dasd_ccw_req *refers;
+ 
+ 		refers = cqr->refers;
+-		/* remove the request from the device queue */
+-		list_del(&cqr->list);
++		/* remove the request from the block queue */
++		list_del(&cqr->blocklist);
+ 		/* free the finished erp request */
+-		dasd_free_erp_request(cqr, device);
++		dasd_free_erp_request(cqr, cqr->memdev);
+ 		cqr = refers;
+ 	}
+ 
+@@ -157,7 +156,7 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
+ {
+ 	struct dasd_device *device;
+ 
+-	device = cqr->device;
++	device = cqr->startdev;
+ 	/* dump sense data */
+ 	if (device->discipline && device->discipline->dump_sense)
+ 		device->discipline->dump_sense(device, cqr, irb);
+diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
+index 1d95822..d13ea05 100644
+--- a/drivers/s390/block/dasd_fba.c
++++ b/drivers/s390/block/dasd_fba.c
+@@ -117,6 +117,7 @@ locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
+ static int
+ dasd_fba_check_characteristics(struct dasd_device *device)
+ {
++	struct dasd_block *block;
+ 	struct dasd_fba_private *private;
+ 	struct ccw_device *cdev = device->cdev;
+ 	void *rdc_data;
+@@ -133,6 +134,16 @@ dasd_fba_check_characteristics(struct dasd_device *device)
+ 		}
+ 		device->private = (void *) private;
+ 	}
++	block = dasd_alloc_block();
++	if (IS_ERR(block)) {
++		DEV_MESSAGE(KERN_WARNING, device, "%s",
++			    "could not allocate dasd block structure");
++		kfree(device->private);
++		return PTR_ERR(block);
++	}
++	device->block = block;
++	block->base = device;
++
+ 	/* Read Device Characteristics */
+ 	rdc_data = (void *) &(private->rdc_data);
+ 	rc = dasd_generic_read_dev_chars(device, "FBA ", &rdc_data, 32);
+@@ -155,60 +166,37 @@ dasd_fba_check_characteristics(struct dasd_device *device)
+ 	return 0;
+ }
+ 
+-static int
+-dasd_fba_do_analysis(struct dasd_device *device)
++static int dasd_fba_do_analysis(struct dasd_block *block)
+ {
+ 	struct dasd_fba_private *private;
+ 	int sb, rc;
+ 
+-	private = (struct dasd_fba_private *) device->private;
++	private = (struct dasd_fba_private *) block->base->private;
+ 	rc = dasd_check_blocksize(private->rdc_data.blk_size);
+ 	if (rc) {
+-		DEV_MESSAGE(KERN_INFO, device, "unknown blocksize %d",
++		DEV_MESSAGE(KERN_INFO, block->base, "unknown blocksize %d",
+ 			    private->rdc_data.blk_size);
+ 		return rc;
+ 	}
+-	device->blocks = private->rdc_data.blk_bdsa;
+-	device->bp_block = private->rdc_data.blk_size;
+-	device->s2b_shift = 0;	/* bits to shift 512 to get a block */
++	block->blocks = private->rdc_data.blk_bdsa;
++	block->bp_block = private->rdc_data.blk_size;
++	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
+ 	for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1)
+-		device->s2b_shift++;
++		block->s2b_shift++;
+ 	return 0;
+ }
+ 
+-static int
+-dasd_fba_fill_geometry(struct dasd_device *device, struct hd_geometry *geo)
++static int dasd_fba_fill_geometry(struct dasd_block *block,
++				  struct hd_geometry *geo)
+ {
+-	if (dasd_check_blocksize(device->bp_block) != 0)
++	if (dasd_check_blocksize(block->bp_block) != 0)
+ 		return -EINVAL;
+-	geo->cylinders = (device->blocks << device->s2b_shift) >> 10;
++	geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
+ 	geo->heads = 16;
+-	geo->sectors = 128 >> device->s2b_shift;
++	geo->sectors = 128 >> block->s2b_shift;
+ 	return 0;
+ }
+ 
+-static dasd_era_t
+-dasd_fba_examine_error(struct dasd_ccw_req * cqr, struct irb * irb)
+-{
+-	struct dasd_device *device;
+-	struct ccw_device *cdev;
+-
+-	device = (struct dasd_device *) cqr->device;
+-	if (irb->scsw.cstat == 0x00 &&
+-	    irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+-		return dasd_era_none;
+-
+-	cdev = device->cdev;
+-	switch (cdev->id.dev_type) {
+-	case 0x3370:
+-		return dasd_3370_erp_examine(cqr, irb);
+-	case 0x9336:
+-		return dasd_9336_erp_examine(cqr, irb);
+-	default:
+-		return dasd_era_recover;
+-	}
+-}
+-
+ static dasd_erp_fn_t
+ dasd_fba_erp_action(struct dasd_ccw_req * cqr)
+ {
+@@ -221,13 +209,34 @@ dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
+ 	if (cqr->function == dasd_default_erp_action)
+ 		return dasd_default_erp_postaction;
+ 
+-	DEV_MESSAGE(KERN_WARNING, cqr->device, "unknown ERP action %p",
++	DEV_MESSAGE(KERN_WARNING, cqr->startdev, "unknown ERP action %p",
+ 		    cqr->function);
+ 	return NULL;
+ }
+ 
+-static struct dasd_ccw_req *
+-dasd_fba_build_cp(struct dasd_device * device, struct request *req)
++static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device,
++						   struct irb *irb)
++{
++	char mask;
++
++	/* first of all check for state change pending interrupt */
++	mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
++	if ((irb->scsw.dstat & mask) == mask) {
++		dasd_generic_handle_state_change(device);
++		return;
++	}
++
++	/* check for unsolicited interrupts */
++	DEV_MESSAGE(KERN_DEBUG, device, "%s",
++		    "unsolicited interrupt received");
++	device->discipline->dump_sense(device, NULL, irb);
++	dasd_schedule_device_bh(device);
++	return;
++};
++
++static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
++					      struct dasd_block *block,
++					      struct request *req)
+ {
+ 	struct dasd_fba_private *private;
+ 	unsigned long *idaws;
+@@ -242,17 +251,17 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
+ 	unsigned int blksize, off;
+ 	unsigned char cmd;
+ 
+-	private = (struct dasd_fba_private *) device->private;
++	private = (struct dasd_fba_private *) block->base->private;
+ 	if (rq_data_dir(req) == READ) {
+ 		cmd = DASD_FBA_CCW_READ;
+ 	} else if (rq_data_dir(req) == WRITE) {
+ 		cmd = DASD_FBA_CCW_WRITE;
+ 	} else
+ 		return ERR_PTR(-EINVAL);
+-	blksize = device->bp_block;
++	blksize = block->bp_block;
+ 	/* Calculate record id of first and last block. */
+-	first_rec = req->sector >> device->s2b_shift;
+-	last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift;
++	first_rec = req->sector >> block->s2b_shift;
++	last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
+ 	/* Check struct bio and count the number of blocks for the request. */
+ 	count = 0;
+ 	cidaw = 0;
+@@ -260,7 +269,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
+ 		if (bv->bv_len & (blksize - 1))
+ 			/* Fba can only do full blocks. */
+ 			return ERR_PTR(-EINVAL);
+-		count += bv->bv_len >> (device->s2b_shift + 9);
++		count += bv->bv_len >> (block->s2b_shift + 9);
+ #if defined(CONFIG_64BIT)
+ 		if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
+ 			cidaw += bv->bv_len / blksize;
+@@ -284,13 +293,13 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
+ 	}
+ 	/* Allocate the ccw request. */
+ 	cqr = dasd_smalloc_request(dasd_fba_discipline.name,
+-				   cplength, datasize, device);
++				   cplength, datasize, memdev);
+ 	if (IS_ERR(cqr))
+ 		return cqr;
+ 	ccw = cqr->cpaddr;
+ 	/* First ccw is define extent. */
+ 	define_extent(ccw++, cqr->data, rq_data_dir(req),
+-		      device->bp_block, req->sector, req->nr_sectors);
++		      block->bp_block, req->sector, req->nr_sectors);
+ 	/* Build locate_record + read/write ccws. */
+ 	idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
+ 	LO_data = (struct LO_fba_data *) (idaws + cidaw);
+@@ -326,7 +335,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
+ 					ccw[-1].flags |= CCW_FLAG_CC;
+ 			}
+ 			ccw->cmd_code = cmd;
+-			ccw->count = device->bp_block;
++			ccw->count = block->bp_block;
+ 			if (idal_is_needed(dst, blksize)) {
+ 				ccw->cda = (__u32)(addr_t) idaws;
+ 				ccw->flags = CCW_FLAG_IDA;
+@@ -342,7 +351,9 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
+ 	}
+ 	if (req->cmd_flags & REQ_FAILFAST)
+ 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+-	cqr->device = device;
++	cqr->startdev = memdev;
++	cqr->memdev = memdev;
++	cqr->block = block;
+ 	cqr->expires = 5 * 60 * HZ;	/* 5 minutes */
+ 	cqr->retries = 32;
+ 	cqr->buildclk = get_clock();
+@@ -363,8 +374,8 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+ 
+ 	if (!dasd_page_cache)
+ 		goto out;
+-	private = (struct dasd_fba_private *) cqr->device->private;
+-	blksize = cqr->device->bp_block;
++	private = (struct dasd_fba_private *) cqr->block->base->private;
++	blksize = cqr->block->bp_block;
+ 	ccw = cqr->cpaddr;
+ 	/* Skip over define extent & locate record. */
+ 	ccw++;
+@@ -394,10 +405,15 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
+ 	}
+ out:
+ 	status = cqr->status == DASD_CQR_DONE;
+-	dasd_sfree_request(cqr, cqr->device);
++	dasd_sfree_request(cqr, cqr->memdev);
+ 	return status;
+ }
+ 
++static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr)
++{
++	cqr->status = DASD_CQR_FILLED;
++};
++
+ static int
+ dasd_fba_fill_info(struct dasd_device * device,
+ 		   struct dasd_information2_t * info)
+@@ -546,9 +562,10 @@ static struct dasd_discipline dasd_fba_discipline = {
+ 	.fill_geometry = dasd_fba_fill_geometry,
+ 	.start_IO = dasd_start_IO,
+ 	.term_IO = dasd_term_IO,
+-	.examine_error = dasd_fba_examine_error,
++	.handle_terminated_request = dasd_fba_handle_terminated_request,
+ 	.erp_action = dasd_fba_erp_action,
+ 	.erp_postaction = dasd_fba_erp_postaction,
++	.handle_unsolicited_interrupt = dasd_fba_handle_unsolicited_interrupt,
+ 	.build_cp = dasd_fba_build_cp,
+ 	.free_cp = dasd_fba_free_cp,
+ 	.dump_sense = dasd_fba_dump_sense,
+diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
+index 47ba446..aee6565 100644
+--- a/drivers/s390/block/dasd_genhd.c
++++ b/drivers/s390/block/dasd_genhd.c
+@@ -25,14 +25,15 @@
+ /*
+  * Allocate and register gendisk structure for device.
+  */
+-int
+-dasd_gendisk_alloc(struct dasd_device *device)
++int dasd_gendisk_alloc(struct dasd_block *block)
+ {
+ 	struct gendisk *gdp;
++	struct dasd_device *base;
+ 	int len;
+ 
+ 	/* Make sure the minor for this device exists. */
+-	if (device->devindex >= DASD_PER_MAJOR)
++	base = block->base;
++	if (base->devindex >= DASD_PER_MAJOR)
+ 		return -EBUSY;
+ 
+ 	gdp = alloc_disk(1 << DASD_PARTN_BITS);
+@@ -41,9 +42,9 @@ dasd_gendisk_alloc(struct dasd_device *device)
+ 
+ 	/* Initialize gendisk structure. */
+ 	gdp->major = DASD_MAJOR;
+-	gdp->first_minor = device->devindex << DASD_PARTN_BITS;
++	gdp->first_minor = base->devindex << DASD_PARTN_BITS;
+ 	gdp->fops = &dasd_device_operations;
+-	gdp->driverfs_dev = &device->cdev->dev;
++	gdp->driverfs_dev = &base->cdev->dev;
+ 
+ 	/*
+ 	 * Set device name.
+@@ -53,53 +54,51 @@ dasd_gendisk_alloc(struct dasd_device *device)
+ 	 *   dasdaaaa - dasdzzzz : 456976 devices, added up = 475252
+ 	 */
+ 	len = sprintf(gdp->disk_name, "dasd");
+-	if (device->devindex > 25) {
+-	        if (device->devindex > 701) {
+-		        if (device->devindex > 18277)
++	if (base->devindex > 25) {
++		if (base->devindex > 701) {
++			if (base->devindex > 18277)
+ 			        len += sprintf(gdp->disk_name + len, "%c",
+-					       'a'+(((device->devindex-18278)
++					       'a'+(((base->devindex-18278)
+ 						     /17576)%26));
+ 			len += sprintf(gdp->disk_name + len, "%c",
+-				       'a'+(((device->devindex-702)/676)%26));
++				       'a'+(((base->devindex-702)/676)%26));
+ 		}
+ 		len += sprintf(gdp->disk_name + len, "%c",
+-			       'a'+(((device->devindex-26)/26)%26));
++			       'a'+(((base->devindex-26)/26)%26));
+ 	}
+-	len += sprintf(gdp->disk_name + len, "%c", 'a'+(device->devindex%26));
++	len += sprintf(gdp->disk_name + len, "%c", 'a'+(base->devindex%26));
+ 
+-	if (device->features & DASD_FEATURE_READONLY)
++	if (block->base->features & DASD_FEATURE_READONLY)
+ 		set_disk_ro(gdp, 1);
+-	gdp->private_data = device;
+-	gdp->queue = device->request_queue;
+-	device->gdp = gdp;
+-	set_capacity(device->gdp, 0);
+-	add_disk(device->gdp);
++	gdp->private_data = block;
++	gdp->queue = block->request_queue;
++	block->gdp = gdp;
++	set_capacity(block->gdp, 0);
++	add_disk(block->gdp);
+ 	return 0;
+ }
+ 
+ /*
+  * Unregister and free gendisk structure for device.
+  */
+-void
+-dasd_gendisk_free(struct dasd_device *device)
++void dasd_gendisk_free(struct dasd_block *block)
+ {
+-	if (device->gdp) {
+-		del_gendisk(device->gdp);
+-		device->gdp->queue = NULL;
+-		put_disk(device->gdp);
+-		device->gdp = NULL;
++	if (block->gdp) {
++		del_gendisk(block->gdp);
++		block->gdp->queue = NULL;
++		put_disk(block->gdp);
++		block->gdp = NULL;
+ 	}
+ }
+ 
+ /*
+  * Trigger a partition detection.
+  */
+-int
+-dasd_scan_partitions(struct dasd_device * device)
++int dasd_scan_partitions(struct dasd_block *block)
+ {
+ 	struct block_device *bdev;
+ 
+-	bdev = bdget_disk(device->gdp, 0);
++	bdev = bdget_disk(block->gdp, 0);
+ 	if (!bdev || blkdev_get(bdev, FMODE_READ, 1) < 0)
+ 		return -ENODEV;
+ 	/*
+@@ -117,7 +116,7 @@ dasd_scan_partitions(struct dasd_device * device)
+ 	 * is why the assignment to device->bdev is done AFTER
+ 	 * the BLKRRPART ioctl.
+ 	 */
+-	device->bdev = bdev;
++	block->bdev = bdev;
+ 	return 0;
+ }
+ 
+@@ -125,8 +124,7 @@ dasd_scan_partitions(struct dasd_device * device)
+  * Remove all inodes in the system for a device, delete the
+  * partitions and make device unusable by setting its size to zero.
+  */
+-void
+-dasd_destroy_partitions(struct dasd_device * device)
++void dasd_destroy_partitions(struct dasd_block *block)
+ {
+ 	/* The two structs have 168/176 byte on 31/64 bit. */
+ 	struct blkpg_partition bpart;
+@@ -137,8 +135,8 @@ dasd_destroy_partitions(struct dasd_device * device)
+ 	 * Get the bdev pointer from the device structure and clear
+ 	 * device->bdev to lower the offline open_count limit again.
+ 	 */
+-	bdev = device->bdev;
+-	device->bdev = NULL;
++	bdev = block->bdev;
++	block->bdev = NULL;
+ 
+ 	/*
+ 	 * See fs/partition/check.c:delete_partition
+@@ -149,17 +147,16 @@ dasd_destroy_partitions(struct dasd_device * device)
+ 	memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
+ 	barg.data = (void __force __user *) &bpart;
+ 	barg.op = BLKPG_DEL_PARTITION;
+-	for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
++	for (bpart.pno = block->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
+ 		ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
+ 
+-	invalidate_partition(device->gdp, 0);
++	invalidate_partition(block->gdp, 0);
+ 	/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
+ 	blkdev_put(bdev);
+-	set_capacity(device->gdp, 0);
++	set_capacity(block->gdp, 0);
+ }
+ 
+-int
+-dasd_gendisk_init(void)
++int dasd_gendisk_init(void)
+ {
+ 	int rc;
+ 
+@@ -174,8 +171,7 @@ dasd_gendisk_init(void)
+ 	return 0;
+ }
+ 
+-void
+-dasd_gendisk_exit(void)
++void dasd_gendisk_exit(void)
+ {
+ 	unregister_blkdev(DASD_MAJOR, "dasd");
+ }
+diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
+index d427dae..44b2984 100644
+--- a/drivers/s390/block/dasd_int.h
++++ b/drivers/s390/block/dasd_int.h
+@@ -64,13 +64,7 @@
+  * SECTION: Type definitions
+  */
+ struct dasd_device;
+-
+-typedef enum {
+-	dasd_era_fatal = -1,	/* no chance to recover		     */
+-	dasd_era_none = 0,	/* don't recover, everything alright */
+-	dasd_era_msg = 1,	/* don't recover, just report...     */
+-	dasd_era_recover = 2	/* recovery action recommended	     */
+-} dasd_era_t;
++struct dasd_block;
+ 
+ /* BIT DEFINITIONS FOR SENSE DATA */
+ #define DASD_SENSE_BIT_0 0x80
+@@ -151,19 +145,22 @@ do { \
+ 
+ struct dasd_ccw_req {
+ 	unsigned int magic;		/* Eye catcher */
+-        struct list_head list;		/* list_head for request queueing. */
++	struct list_head devlist;	/* for dasd_device request queue */
++	struct list_head blocklist;	/* for dasd_block request queue */
+ 
+ 	/* Where to execute what... */
+-	struct dasd_device *device;	/* device the request is for */
++	struct dasd_block *block;	/* the originating block device */
++	struct dasd_device *memdev;	/* the device used to allocate this */
++	struct dasd_device *startdev;	/* device the request is started on */
+ 	struct ccw1 *cpaddr;		/* address of channel program */
+-	char status;	        	/* status of this request */
++	char status;			/* status of this request */
+ 	short retries;			/* A retry counter */
+ 	unsigned long flags;        	/* flags of this request */
+ 
+ 	/* ... and how */
+ 	unsigned long starttime;	/* jiffies time of request start */
+ 	int expires;			/* expiration period in jiffies */
+-	char lpm;               	/* logical path mask */
++	char lpm;			/* logical path mask */
+ 	void *data;			/* pointer to data area */
+ 
+ 	/* these are important for recovering erroneous requests          */
+@@ -178,20 +175,27 @@ struct dasd_ccw_req {
+ 	unsigned long long endclk;	/* TOD-clock of request termination */
+ 
+         /* Callback that is called after reaching final status. */
+-        void (*callback)(struct dasd_ccw_req *, void *data);
+-        void *callback_data;
++	void (*callback)(struct dasd_ccw_req *, void *data);
++	void *callback_data;
+ };
+ 
+ /*
+  * dasd_ccw_req -> status can be:
+  */
+-#define DASD_CQR_FILLED   0x00	/* request is ready to be processed */
+-#define DASD_CQR_QUEUED   0x01	/* request is queued to be processed */
+-#define DASD_CQR_IN_IO    0x02	/* request is currently in IO */
+-#define DASD_CQR_DONE     0x03	/* request is completed successfully */
+-#define DASD_CQR_ERROR    0x04	/* request is completed with error */
+-#define DASD_CQR_FAILED   0x05	/* request is finally failed */
+-#define DASD_CQR_CLEAR    0x06	/* request is clear pending */
++#define DASD_CQR_FILLED 	0x00	/* request is ready to be processed */
++#define DASD_CQR_DONE		0x01	/* request is completed successfully */
++#define DASD_CQR_NEED_ERP	0x02	/* request needs recovery action */
++#define DASD_CQR_IN_ERP 	0x03	/* request is in recovery */
++#define DASD_CQR_FAILED 	0x04	/* request is finally failed */
++#define DASD_CQR_TERMINATED	0x05	/* request was stopped by driver */
++
++#define DASD_CQR_QUEUED 	0x80	/* request is queued to be processed */
++#define DASD_CQR_IN_IO		0x81	/* request is currently in IO */
++#define DASD_CQR_ERROR		0x82	/* request is completed with error */
++#define DASD_CQR_CLEAR_PENDING	0x83	/* request is clear pending */
++#define DASD_CQR_CLEARED	0x84	/* request was cleared */
++#define DASD_CQR_SUCCESS	0x85	/* request was successfull */
++
+ 
+ /* per dasd_ccw_req flags */
+ #define DASD_CQR_FLAGS_USE_ERP   0	/* use ERP for this request */
+@@ -214,52 +218,71 @@ struct dasd_discipline {
+ 
+ 	struct list_head list;	/* used for list of disciplines */
+ 
+-        /*
+-         * Device recognition functions. check_device is used to verify
+-         * the sense data and the information returned by read device
+-         * characteristics. It returns 0 if the discipline can be used
+-         * for the device in question.
+-         * do_analysis is used in the step from device state "basic" to
+-         * state "accept". It returns 0 if the device can be made ready,
+-         * it returns -EMEDIUMTYPE if the device can't be made ready or
+-         * -EAGAIN if do_analysis started a ccw that needs to complete
+-         * before the analysis may be repeated.
+-         */
+-        int (*check_device)(struct dasd_device *);
+-	int (*do_analysis) (struct dasd_device *);
+-
+-        /*
+-         * Device operation functions. build_cp creates a ccw chain for
+-         * a block device request, start_io starts the request and
+-         * term_IO cancels it (e.g. in case of a timeout). format_device
+-         * returns a ccw chain to be used to format the device.
+-         */
++	/*
++	 * Device recognition functions. check_device is used to verify
++	 * the sense data and the information returned by read device
++	 * characteristics. It returns 0 if the discipline can be used
++	 * for the device in question. uncheck_device is called during
++	 * device shutdown to deregister a device from its discipline.
++	 */
++	int (*check_device) (struct dasd_device *);
++	void (*uncheck_device) (struct dasd_device *);
++
++	/*
++	 * do_analysis is used in the step from device state "basic" to
++	 * state "accept". It returns 0 if the device can be made ready,
++	 * it returns -EMEDIUMTYPE if the device can't be made ready or
++	 * -EAGAIN if do_analysis started a ccw that needs to complete
++	 * before the analysis may be repeated.
++	 */
++	int (*do_analysis) (struct dasd_block *);
++
++	/*
++	 * Last things to do when a device is set online, and first things
++	 * when it is set offline.
++	 */
++	int (*ready_to_online) (struct dasd_device *);
++	int (*online_to_ready) (struct dasd_device *);
++
++	/*
++	 * Device operation functions. build_cp creates a ccw chain for
++	 * a block device request, start_io starts the request and
++	 * term_IO cancels it (e.g. in case of a timeout). format_device
++	 * returns a ccw chain to be used to format the device.
++	 * handle_terminated_request allows to examine a cqr and prepare
++	 * it for retry.
++	 */
+ 	struct dasd_ccw_req *(*build_cp) (struct dasd_device *,
++					  struct dasd_block *,
+ 					  struct request *);
+ 	int (*start_IO) (struct dasd_ccw_req *);
+ 	int (*term_IO) (struct dasd_ccw_req *);
++	void (*handle_terminated_request) (struct dasd_ccw_req *);
+ 	struct dasd_ccw_req *(*format_device) (struct dasd_device *,
+ 					       struct format_data_t *);
+ 	int (*free_cp) (struct dasd_ccw_req *, struct request *);
+-        /*
+-         * Error recovery functions. examine_error() returns a value that
+-         * indicates what to do for an error condition. If examine_error()
++
++	/*
++	 * Error recovery functions. examine_error() returns a value that
++	 * indicates what to do for an error condition. If examine_error()
+ 	 * returns 'dasd_era_recover' erp_action() is called to create a
+-         * special error recovery ccw. erp_postaction() is called after
+-         * an error recovery ccw has finished its execution. dump_sense
+-         * is called for every error condition to print the sense data
+-         * to the console.
+-         */
+-	dasd_era_t(*examine_error) (struct dasd_ccw_req *, struct irb *);
++	 * special error recovery ccw. erp_postaction() is called after
++	 * an error recovery ccw has finished its execution. dump_sense
++	 * is called for every error condition to print the sense data
++	 * to the console.
++	 */
+ 	dasd_erp_fn_t(*erp_action) (struct dasd_ccw_req *);
+ 	dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *);
+ 	void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
+ 			    struct irb *);
+ 
++	void (*handle_unsolicited_interrupt) (struct dasd_device *,
++					      struct irb *);
++
+         /* i/o control functions. */
+-	int (*fill_geometry) (struct dasd_device *, struct hd_geometry *);
++	int (*fill_geometry) (struct dasd_block *, struct hd_geometry *);
+ 	int (*fill_info) (struct dasd_device *, struct dasd_information2_t *);
+-	int (*ioctl) (struct dasd_device *, unsigned int, void __user *);
++	int (*ioctl) (struct dasd_block *, unsigned int, void __user *);
+ };
+ 
+ extern struct dasd_discipline *dasd_diag_discipline_pointer;
+@@ -267,12 +290,18 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer;
+ /*
+  * Unique identifier for dasd device.
+  */
++#define UA_NOT_CONFIGURED  0x00
++#define UA_BASE_DEVICE	   0x01
++#define UA_BASE_PAV_ALIAS  0x02
++#define UA_HYPER_PAV_ALIAS 0x03
++
+ struct dasd_uid {
+-	__u8 alias;
++	__u8 type;
+ 	char vendor[4];
+ 	char serial[15];
+ 	__u16 ssid;
+-	__u8 unit_addr;
++	__u8 real_unit_addr;
++	__u8 base_unit_addr;
+ };
+ 
+ /*
+@@ -293,14 +322,9 @@ struct dasd_uid {
+ 
+ struct dasd_device {
+ 	/* Block device stuff. */
+-	struct gendisk *gdp;
+-	struct request_queue *request_queue;
+-	spinlock_t request_queue_lock;
+-	struct block_device *bdev;
++	struct dasd_block *block;
++
+         unsigned int devindex;
+-	unsigned long blocks;	   /* size of volume in blocks */
+-	unsigned int bp_block;	   /* bytes per block */
+-	unsigned int s2b_shift;	   /* log2 (bp_block/512) */
+ 	unsigned long flags;	   /* per device flags */
+ 	unsigned short features;   /* copy of devmap-features (read-only!) */
+ 
+@@ -316,9 +340,8 @@ struct dasd_device {
+ 	int state, target;
+ 	int stopped;		/* device (ccw_device_start) was stopped */
+ 
+-	/* Open and reference count. */
++	/* reference count. */
+         atomic_t ref_count;
+-	atomic_t open_count;
+ 
+ 	/* ccw queue and memory for static ccw/erp buffers. */
+ 	struct list_head ccw_queue;
+@@ -337,20 +360,45 @@ struct dasd_device {
+ 
+ 	struct ccw_device *cdev;
+ 
++	/* hook for alias management */
++	struct list_head alias_list;
++};
++
++struct dasd_block {
++	/* Block device stuff. */
++	struct gendisk *gdp;
++	struct request_queue *request_queue;
++	spinlock_t request_queue_lock;
++	struct block_device *bdev;
++	atomic_t open_count;
++
++	unsigned long blocks;	   /* size of volume in blocks */
++	unsigned int bp_block;	   /* bytes per block */
++	unsigned int s2b_shift;	   /* log2 (bp_block/512) */
++
++	struct dasd_device *base;
++	struct list_head ccw_queue;
++	spinlock_t queue_lock;
++
++	atomic_t tasklet_scheduled;
++	struct tasklet_struct tasklet;
++	struct timer_list timer;
++
+ #ifdef CONFIG_DASD_PROFILE
+ 	struct dasd_profile_info_t profile;
+ #endif
+ };
+ 
++
++
+ /* reasons why device (ccw_device_start) was stopped */
+ #define DASD_STOPPED_NOT_ACC 1         /* not accessible */
+ #define DASD_STOPPED_QUIESCE 2         /* Quiesced */
+ #define DASD_STOPPED_PENDING 4         /* long busy */
+ #define DASD_STOPPED_DC_WAIT 8         /* disconnected, wait */
+-#define DASD_STOPPED_DC_EIO  16        /* disconnected, return -EIO */
++#define DASD_STOPPED_SU      16        /* summary unit check handling */
+ 
+ /* per device flags */
+-#define DASD_FLAG_DSC_ERROR	2	/* return -EIO when disconnected */
+ #define DASD_FLAG_OFFLINE	3	/* device is in offline processing */
+ #define DASD_FLAG_EER_SNSS	4	/* A SNSS is required */
+ #define DASD_FLAG_EER_IN_USE	5	/* A SNSS request is running */
+@@ -489,6 +537,9 @@ dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
+ struct dasd_device *dasd_alloc_device(void);
+ void dasd_free_device(struct dasd_device *);
+ 
++struct dasd_block *dasd_alloc_block(void);
++void dasd_free_block(struct dasd_block *);
++
+ void dasd_enable_device(struct dasd_device *);
+ void dasd_set_target_state(struct dasd_device *, int);
+ void dasd_kick_device(struct dasd_device *);
+@@ -497,18 +548,23 @@ void dasd_add_request_head(struct dasd_ccw_req *);
+ void dasd_add_request_tail(struct dasd_ccw_req *);
+ int  dasd_start_IO(struct dasd_ccw_req *);
+ int  dasd_term_IO(struct dasd_ccw_req *);
+-void dasd_schedule_bh(struct dasd_device *);
++void dasd_schedule_device_bh(struct dasd_device *);
++void dasd_schedule_block_bh(struct dasd_block *);
+ int  dasd_sleep_on(struct dasd_ccw_req *);
+ int  dasd_sleep_on_immediatly(struct dasd_ccw_req *);
+ int  dasd_sleep_on_interruptible(struct dasd_ccw_req *);
+-void dasd_set_timer(struct dasd_device *, int);
+-void dasd_clear_timer(struct dasd_device *);
++void dasd_device_set_timer(struct dasd_device *, int);
++void dasd_device_clear_timer(struct dasd_device *);
++void dasd_block_set_timer(struct dasd_block *, int);
++void dasd_block_clear_timer(struct dasd_block *);
+ int  dasd_cancel_req(struct dasd_ccw_req *);
++int dasd_flush_device_queue(struct dasd_device *);
+ int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *);
+ void dasd_generic_remove (struct ccw_device *cdev);
+ int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
+ int dasd_generic_set_offline (struct ccw_device *cdev);
+ int dasd_generic_notify(struct ccw_device *, int);
++void dasd_generic_handle_state_change(struct dasd_device *);
+ 
+ int dasd_generic_read_dev_chars(struct dasd_device *, char *, void **, int);
+ 
+@@ -542,10 +598,10 @@ int dasd_busid_known(char *);
+ /* externals in dasd_gendisk.c */
+ int  dasd_gendisk_init(void);
+ void dasd_gendisk_exit(void);
+-int dasd_gendisk_alloc(struct dasd_device *);
+-void dasd_gendisk_free(struct dasd_device *);
+-int dasd_scan_partitions(struct dasd_device *);
+-void dasd_destroy_partitions(struct dasd_device *);
++int dasd_gendisk_alloc(struct dasd_block *);
++void dasd_gendisk_free(struct dasd_block *);
++int dasd_scan_partitions(struct dasd_block *);
++void dasd_destroy_partitions(struct dasd_block *);
+ 
+ /* externals in dasd_ioctl.c */
+ int  dasd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+@@ -563,20 +619,9 @@ struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
+ void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
+ void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
+ 
+-/* externals in dasd_3370_erp.c */
+-dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *);
+-
+ /* externals in dasd_3990_erp.c */
+-dasd_era_t dasd_3990_erp_examine(struct dasd_ccw_req *, struct irb *);
+ struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
+ 
+-/* externals in dasd_9336_erp.c */
+-dasd_era_t dasd_9336_erp_examine(struct dasd_ccw_req *, struct irb *);
+-
+-/* externals in dasd_9336_erp.c */
+-dasd_era_t dasd_9343_erp_examine(struct dasd_ccw_req *, struct irb *);
+-struct dasd_ccw_req *dasd_9343_erp_action(struct dasd_ccw_req *);
+-
+ /* externals in dasd_eer.c */
+ #ifdef CONFIG_DASD_EER
+ int dasd_eer_init(void);
+diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
+index 672eb0a..91a6463 100644
+--- a/drivers/s390/block/dasd_ioctl.c
++++ b/drivers/s390/block/dasd_ioctl.c
+@@ -38,15 +38,15 @@ dasd_ioctl_api_version(void __user *argp)
+ static int
+ dasd_ioctl_enable(struct block_device *bdev)
+ {
+-	struct dasd_device *device = bdev->bd_disk->private_data;
++	struct dasd_block *block = bdev->bd_disk->private_data;
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EACCES;
+ 
+-	dasd_enable_device(device);
++	dasd_enable_device(block->base);
+ 	/* Formatting the dasd device can change the capacity. */
+ 	mutex_lock(&bdev->bd_mutex);
+-	i_size_write(bdev->bd_inode, (loff_t)get_capacity(device->gdp) << 9);
++	i_size_write(bdev->bd_inode, (loff_t)get_capacity(block->gdp) << 9);
+ 	mutex_unlock(&bdev->bd_mutex);
+ 	return 0;
+ }
+@@ -58,7 +58,7 @@ dasd_ioctl_enable(struct block_device *bdev)
+ static int
+ dasd_ioctl_disable(struct block_device *bdev)
+ {
+-	struct dasd_device *device = bdev->bd_disk->private_data;
++	struct dasd_block *block = bdev->bd_disk->private_data;
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EACCES;
+@@ -71,7 +71,7 @@ dasd_ioctl_disable(struct block_device *bdev)
+ 	 * using the BIODASDFMT ioctl. Therefore the correct state for the
+ 	 * device is DASD_STATE_BASIC that allows to do basic i/o.
+ 	 */
+-	dasd_set_target_state(device, DASD_STATE_BASIC);
++	dasd_set_target_state(block->base, DASD_STATE_BASIC);
+ 	/*
+ 	 * Set i_size to zero, since read, write, etc. check against this
+ 	 * value.
+@@ -85,19 +85,19 @@ dasd_ioctl_disable(struct block_device *bdev)
+ /*
+  * Quiesce device.
+  */
+-static int
+-dasd_ioctl_quiesce(struct dasd_device *device)
++static int dasd_ioctl_quiesce(struct dasd_block *block)
+ {
+ 	unsigned long flags;
++	struct dasd_device *base;
+ 
++	base = block->base;
+ 	if (!capable (CAP_SYS_ADMIN))
+ 		return -EACCES;
+ 
+-	DEV_MESSAGE (KERN_DEBUG, device, "%s",
+-		     "Quiesce IO on device");
+-	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+-	device->stopped |= DASD_STOPPED_QUIESCE;
+-	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
++	DEV_MESSAGE(KERN_DEBUG, base, "%s", "Quiesce IO on device");
++	spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
++	base->stopped |= DASD_STOPPED_QUIESCE;
++	spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
+ 	return 0;
+ }
+ 
+@@ -105,22 +105,21 @@ dasd_ioctl_quiesce(struct dasd_device *device)
+ /*
+  * Quiesce device.
+  */
+-static int
+-dasd_ioctl_resume(struct dasd_device *device)
++static int dasd_ioctl_resume(struct dasd_block *block)
+ {
+ 	unsigned long flags;
++	struct dasd_device *base;
+ 
++	base = block->base;
+ 	if (!capable (CAP_SYS_ADMIN))
+ 		return -EACCES;
+ 
+-	DEV_MESSAGE (KERN_DEBUG, device, "%s",
+-		     "resume IO on device");
+-
+-	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+-	device->stopped &= ~DASD_STOPPED_QUIESCE;
+-	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
++	DEV_MESSAGE(KERN_DEBUG, base, "%s", "resume IO on device");
++	spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
++	base->stopped &= ~DASD_STOPPED_QUIESCE;
++	spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
+ 
+-	dasd_schedule_bh (device);
++	dasd_schedule_block_bh(block);
+ 	return 0;
+ }
+ 
+@@ -130,22 +129,23 @@ dasd_ioctl_resume(struct dasd_device *device)
+  * commands to format a single unit of the device. In terms of the ECKD
+  * devices this means CCWs are generated to format a single track.
+  */
+-static int
+-dasd_format(struct dasd_device * device, struct format_data_t * fdata)
++static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
+ {
+ 	struct dasd_ccw_req *cqr;
++	struct dasd_device *base;
+ 	int rc;
+ 
+-	if (device->discipline->format_device == NULL)
++	base = block->base;
++	if (base->discipline->format_device == NULL)
+ 		return -EPERM;
+ 
+-	if (device->state != DASD_STATE_BASIC) {
+-		DEV_MESSAGE(KERN_WARNING, device, "%s",
++	if (base->state != DASD_STATE_BASIC) {
++		DEV_MESSAGE(KERN_WARNING, base, "%s",
+ 			    "dasd_format: device is not disabled! ");
+ 		return -EBUSY;
+ 	}
+ 
+-	DBF_DEV_EVENT(DBF_NOTICE, device,
++	DBF_DEV_EVENT(DBF_NOTICE, base,
+ 		      "formatting units %d to %d (%d B blocks) flags %d",
+ 		      fdata->start_unit,
+ 		      fdata->stop_unit, fdata->blksize, fdata->intensity);
+@@ -156,20 +156,20 @@ dasd_format(struct dasd_device * device, struct format_data_t * fdata)
+ 	 * enabling the device later.
+ 	 */
+ 	if (fdata->start_unit == 0) {
+-		struct block_device *bdev = bdget_disk(device->gdp, 0);
++		struct block_device *bdev = bdget_disk(block->gdp, 0);
+ 		bdev->bd_inode->i_blkbits = blksize_bits(fdata->blksize);
+ 		bdput(bdev);
+ 	}
+ 
+ 	while (fdata->start_unit <= fdata->stop_unit) {
+-		cqr = device->discipline->format_device(device, fdata);
++		cqr = base->discipline->format_device(base, fdata);
+ 		if (IS_ERR(cqr))
+ 			return PTR_ERR(cqr);
+ 		rc = dasd_sleep_on_interruptible(cqr);
+-		dasd_sfree_request(cqr, cqr->device);
++		dasd_sfree_request(cqr, cqr->memdev);
+ 		if (rc) {
+ 			if (rc != -ERESTARTSYS)
+-				DEV_MESSAGE(KERN_ERR, device,
++				DEV_MESSAGE(KERN_ERR, base,
+ 					    " Formatting of unit %d failed "
+ 					    "with rc = %d",
+ 					    fdata->start_unit, rc);
+@@ -186,7 +186,7 @@ dasd_format(struct dasd_device * device, struct format_data_t * fdata)
+ static int
+ dasd_ioctl_format(struct block_device *bdev, void __user *argp)
+ {
+-	struct dasd_device *device = bdev->bd_disk->private_data;
++	struct dasd_block *block = bdev->bd_disk->private_data;
+ 	struct format_data_t fdata;
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+@@ -194,51 +194,47 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp)
+ 	if (!argp)
+ 		return -EINVAL;
+ 
+-	if (device->features & DASD_FEATURE_READONLY)
++	if (block->base->features & DASD_FEATURE_READONLY)
+ 		return -EROFS;
+ 	if (copy_from_user(&fdata, argp, sizeof(struct format_data_t)))
+ 		return -EFAULT;
+ 	if (bdev != bdev->bd_contains) {
+-		DEV_MESSAGE(KERN_WARNING, device, "%s",
++		DEV_MESSAGE(KERN_WARNING, block->base, "%s",
+ 			    "Cannot low-level format a partition");
+ 		return -EINVAL;
+ 	}
+-	return dasd_format(device, &fdata);
++	return dasd_format(block, &fdata);
+ }
+ 
+ #ifdef CONFIG_DASD_PROFILE
+ /*
+  * Reset device profile information
+  */
+-static int
+-dasd_ioctl_reset_profile(struct dasd_device *device)
++static int dasd_ioctl_reset_profile(struct dasd_block *block)
+ {
+-	memset(&device->profile, 0, sizeof (struct dasd_profile_info_t));
++	memset(&block->profile, 0, sizeof(struct dasd_profile_info_t));
+ 	return 0;
+ }
+ 
+ /*
+  * Return device profile information
+  */
+-static int
+-dasd_ioctl_read_profile(struct dasd_device *device, void __user *argp)
++static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
+ {
+ 	if (dasd_profile_level == DASD_PROFILE_OFF)
+ 		return -EIO;
+-	if (copy_to_user(argp, &device->profile,
+-			 sizeof (struct dasd_profile_info_t)))
++	if (copy_to_user(argp, &block->profile,
++			 sizeof(struct dasd_profile_info_t)))
+ 		return -EFAULT;
+ 	return 0;
+ }
+ #else
+-static int
+-dasd_ioctl_reset_profile(struct dasd_device *device)
++static int dasd_ioctl_reset_profile(struct dasd_block *block)
+ {
+ 	return -ENOSYS;
+ }
+ 
+-static int
+-dasd_ioctl_read_profile(struct dasd_device *device, void __user *argp)
++static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
+ {
+ 	return -ENOSYS;
+ }
+@@ -247,87 +243,88 @@ dasd_ioctl_read_profile(struct dasd_device *device, void __user *argp)
+ /*
+  * Return dasd information. Used for BIODASDINFO and BIODASDINFO2.
+  */
+-static int
+-dasd_ioctl_information(struct dasd_device *device,
+-		unsigned int cmd, void __user *argp)
++static int dasd_ioctl_information(struct dasd_block *block,
++				  unsigned int cmd, void __user *argp)
+ {
+ 	struct dasd_information2_t *dasd_info;
+ 	unsigned long flags;
+ 	int rc;
++	struct dasd_device *base;
+ 	struct ccw_device *cdev;
+ 	struct ccw_dev_id dev_id;
+ 
+-	if (!device->discipline->fill_info)
++	base = block->base;
++	if (!base->discipline->fill_info)
+ 		return -EINVAL;
+ 
+ 	dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
+ 	if (dasd_info == NULL)
+ 		return -ENOMEM;
+ 
+-	rc = device->discipline->fill_info(device, dasd_info);
++	rc = base->discipline->fill_info(base, dasd_info);
+ 	if (rc) {
+ 		kfree(dasd_info);
+ 		return rc;
+ 	}
+ 
+-	cdev = device->cdev;
++	cdev = base->cdev;
+ 	ccw_device_get_id(cdev, &dev_id);
+ 
+ 	dasd_info->devno = dev_id.devno;
+-	dasd_info->schid = _ccw_device_get_subchannel_number(device->cdev);
++	dasd_info->schid = _ccw_device_get_subchannel_number(base->cdev);
+ 	dasd_info->cu_type = cdev->id.cu_type;
+ 	dasd_info->cu_model = cdev->id.cu_model;
+ 	dasd_info->dev_type = cdev->id.dev_type;
+ 	dasd_info->dev_model = cdev->id.dev_model;
+-	dasd_info->status = device->state;
++	dasd_info->status = base->state;
+ 	/*
+ 	 * The open_count is increased for every opener, that includes
+ 	 * the blkdev_get in dasd_scan_partitions.
+ 	 * This must be hidden from user-space.
+ 	 */
+-	dasd_info->open_count = atomic_read(&device->open_count);
+-	if (!device->bdev)
++	dasd_info->open_count = atomic_read(&block->open_count);
++	if (!block->bdev)
+ 		dasd_info->open_count++;
+ 
+ 	/*
+ 	 * check if device is really formatted
+ 	 * LDL / CDL was returned by 'fill_info'
+ 	 */
+-	if ((device->state < DASD_STATE_READY) ||
+-	    (dasd_check_blocksize(device->bp_block)))
++	if ((base->state < DASD_STATE_READY) ||
++	    (dasd_check_blocksize(block->bp_block)))
+ 		dasd_info->format = DASD_FORMAT_NONE;
+ 
+ 	dasd_info->features |=
+-		((device->features & DASD_FEATURE_READONLY) != 0);
++		((base->features & DASD_FEATURE_READONLY) != 0);
+ 
+-	if (device->discipline)
+-		memcpy(dasd_info->type, device->discipline->name, 4);
++	if (base->discipline)
++		memcpy(dasd_info->type, base->discipline->name, 4);
+ 	else
+ 		memcpy(dasd_info->type, "none", 4);
+ 
+-	if (device->request_queue->request_fn) {
++	if (block->request_queue->request_fn) {
+ 		struct list_head *l;
+ #ifdef DASD_EXTENDED_PROFILING
+ 		{
+ 			struct list_head *l;
+-			spin_lock_irqsave(&device->lock, flags);
+-			list_for_each(l, &device->request_queue->queue_head)
++			spin_lock_irqsave(&block->lock, flags);
++			list_for_each(l, &block->request_queue->queue_head)
+ 				dasd_info->req_queue_len++;
+-			spin_unlock_irqrestore(&device->lock, flags);
++			spin_unlock_irqrestore(&block->lock, flags);
+ 		}
+ #endif				/* DASD_EXTENDED_PROFILING */
+-		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+-		list_for_each(l, &device->ccw_queue)
++		spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
++		list_for_each(l, &base->ccw_queue)
+ 			dasd_info->chanq_len++;
+-		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
++		spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
+ 				       flags);
+ 	}
+ 
+ 	rc = 0;
+ 	if (copy_to_user(argp, dasd_info,
+ 			 ((cmd == (unsigned int) BIODASDINFO2) ?
+-			  sizeof (struct dasd_information2_t) :
+-			  sizeof (struct dasd_information_t))))
++			  sizeof(struct dasd_information2_t) :
++			  sizeof(struct dasd_information_t))))
+ 		rc = -EFAULT;
+ 	kfree(dasd_info);
+ 	return rc;
+@@ -339,7 +336,7 @@ dasd_ioctl_information(struct dasd_device *device,
+ static int
+ dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
+ {
+-	struct dasd_device *device =  bdev->bd_disk->private_data;
++	struct dasd_block *block =  bdev->bd_disk->private_data;
+ 	int intval;
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+@@ -351,11 +348,10 @@ dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
+ 		return -EFAULT;
+ 
+ 	set_disk_ro(bdev->bd_disk, intval);
+-	return dasd_set_feature(device->cdev, DASD_FEATURE_READONLY, intval);
++	return dasd_set_feature(block->base->cdev, DASD_FEATURE_READONLY, intval);
+ }
+ 
+-static int
+-dasd_ioctl_readall_cmb(struct dasd_device *device, unsigned int cmd,
++static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
+ 		unsigned long arg)
+ {
+ 	struct cmbdata __user *argp = (void __user *) arg;
+@@ -363,7 +359,7 @@ dasd_ioctl_readall_cmb(struct dasd_device *device, unsigned int cmd,
+ 	struct cmbdata data;
+ 	int ret;
+ 
+-	ret = cmf_readall(device->cdev, &data);
++	ret = cmf_readall(block->base->cdev, &data);
+ 	if (!ret && copy_to_user(argp, &data, min(size, sizeof(*argp))))
+ 		return -EFAULT;
+ 	return ret;
+@@ -374,10 +370,10 @@ dasd_ioctl(struct inode *inode, struct file *file,
+ 	   unsigned int cmd, unsigned long arg)
+ {
+ 	struct block_device *bdev = inode->i_bdev;
+-	struct dasd_device *device = bdev->bd_disk->private_data;
++	struct dasd_block *block = bdev->bd_disk->private_data;
+ 	void __user *argp = (void __user *)arg;
+ 
+-	if (!device)
++	if (!block)
+                 return -ENODEV;
+ 
+ 	if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) {
+@@ -391,33 +387,33 @@ dasd_ioctl(struct inode *inode, struct file *file,
+ 	case BIODASDENABLE:
+ 		return dasd_ioctl_enable(bdev);
+ 	case BIODASDQUIESCE:
+-		return dasd_ioctl_quiesce(device);
++		return dasd_ioctl_quiesce(block);
+ 	case BIODASDRESUME:
+-		return dasd_ioctl_resume(device);
++		return dasd_ioctl_resume(block);
+ 	case BIODASDFMT:
+ 		return dasd_ioctl_format(bdev, argp);
+ 	case BIODASDINFO:
+-		return dasd_ioctl_information(device, cmd, argp);
++		return dasd_ioctl_information(block, cmd, argp);
+ 	case BIODASDINFO2:
+-		return dasd_ioctl_information(device, cmd, argp);
++		return dasd_ioctl_information(block, cmd, argp);
+ 	case BIODASDPRRD:
+-		return dasd_ioctl_read_profile(device, argp);
++		return dasd_ioctl_read_profile(block, argp);
+ 	case BIODASDPRRST:
+-		return dasd_ioctl_reset_profile(device);
++		return dasd_ioctl_reset_profile(block);
+ 	case BLKROSET:
+ 		return dasd_ioctl_set_ro(bdev, argp);
+ 	case DASDAPIVER:
+ 		return dasd_ioctl_api_version(argp);
+ 	case BIODASDCMFENABLE:
+-		return enable_cmf(device->cdev);
++		return enable_cmf(block->base->cdev);
+ 	case BIODASDCMFDISABLE:
+-		return disable_cmf(device->cdev);
++		return disable_cmf(block->base->cdev);
+ 	case BIODASDREADALLCMB:
+-		return dasd_ioctl_readall_cmb(device, cmd, arg);
++		return dasd_ioctl_readall_cmb(block, cmd, arg);
+ 	default:
+ 		/* if the discipline has an ioctl method try it. */
+-		if (device->discipline->ioctl) {
+-			int rval = device->discipline->ioctl(device, cmd, argp);
++		if (block->base->discipline->ioctl) {
++			int rval = block->base->discipline->ioctl(block, cmd, argp);
+ 			if (rval != -ENOIOCTLCMD)
+ 				return rval;
+ 		}
+diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
+index ac7e8ef..28a86f0 100644
+--- a/drivers/s390/block/dasd_proc.c
++++ b/drivers/s390/block/dasd_proc.c
+@@ -54,11 +54,16 @@ static int
+ dasd_devices_show(struct seq_file *m, void *v)
+ {
+ 	struct dasd_device *device;
++	struct dasd_block *block;
+ 	char *substr;
+ 
+ 	device = dasd_device_from_devindex((unsigned long) v - 1);
+ 	if (IS_ERR(device))
+ 		return 0;
++	if (device->block)
++		block = device->block;
++	else
++		return 0;
+ 	/* Print device number. */
+ 	seq_printf(m, "%s", device->cdev->dev.bus_id);
+ 	/* Print discipline string. */
+@@ -67,14 +72,14 @@ dasd_devices_show(struct seq_file *m, void *v)
+ 	else
+ 		seq_printf(m, "(none)");
+ 	/* Print kdev. */
+-	if (device->gdp)
++	if (block->gdp)
+ 		seq_printf(m, " at (%3d:%6d)",
+-			   device->gdp->major, device->gdp->first_minor);
++			   block->gdp->major, block->gdp->first_minor);
+ 	else
+ 		seq_printf(m, "  at (???:??????)");
+ 	/* Print device name. */
+-	if (device->gdp)
+-		seq_printf(m, " is %-8s", device->gdp->disk_name);
++	if (block->gdp)
++		seq_printf(m, " is %-8s", block->gdp->disk_name);
+ 	else
+ 		seq_printf(m, " is ????????");
+ 	/* Print devices features. */
+@@ -100,14 +105,14 @@ dasd_devices_show(struct seq_file *m, void *v)
+ 	case DASD_STATE_READY:
+ 	case DASD_STATE_ONLINE:
+ 		seq_printf(m, "active ");
+-		if (dasd_check_blocksize(device->bp_block))
++		if (dasd_check_blocksize(block->bp_block))
+ 			seq_printf(m, "n/f	 ");
+ 		else
+ 			seq_printf(m,
+ 				   "at blocksize: %d, %ld blocks, %ld MB",
+-				   device->bp_block, device->blocks,
+-				   ((device->bp_block >> 9) *
+-				    device->blocks) >> 11);
++				   block->bp_block, block->blocks,
++				   ((block->bp_block >> 9) *
++				    block->blocks) >> 11);
+ 		break;
+ 	default:
+ 		seq_printf(m, "no stat");
+@@ -137,7 +142,7 @@ static void dasd_devices_stop(struct seq_file *m, void *v)
+ {
+ }
+ 
+-static struct seq_operations dasd_devices_seq_ops = {
++static const struct seq_operations dasd_devices_seq_ops = {
+ 	.start		= dasd_devices_start,
+ 	.next		= dasd_devices_next,
+ 	.stop		= dasd_devices_stop,
+diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
+index 15a5789..7779bfc 100644
+--- a/drivers/s390/block/dcssblk.c
++++ b/drivers/s390/block/dcssblk.c
+@@ -82,7 +82,7 @@ struct dcssblk_dev_info {
+ 	struct request_queue *dcssblk_queue;
+ };
+ 
+-static struct list_head dcssblk_devices = LIST_HEAD_INIT(dcssblk_devices);
++static LIST_HEAD(dcssblk_devices);
+ static struct rw_semaphore dcssblk_devices_sem;
+ 
+ /*
+diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
+index 130de19..7e73e39 100644
+--- a/drivers/s390/char/Makefile
++++ b/drivers/s390/char/Makefile
+@@ -3,7 +3,7 @@
+ #
+ 
+ obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
+-	 sclp_info.o sclp_config.o sclp_chp.o
++	 sclp_cmd.o sclp_config.o sclp_cpi_sys.o
+ 
+ obj-$(CONFIG_TN3270) += raw3270.o
+ obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
+diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
+index 20442fb..a86c053 100644
+--- a/drivers/s390/char/monwriter.c
++++ b/drivers/s390/char/monwriter.c
+@@ -295,7 +295,7 @@ module_init(mon_init);
+ module_exit(mon_exit);
+ 
+ module_param_named(max_bufs, mon_max_bufs, int, 0644);
+-MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers"
++MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers "
+ 		 "that can be active at one time");
+ 
+ MODULE_AUTHOR("Melissa Howland <Melissa.Howland at us.ibm.com>");
+diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
+index 8d1c64a..0d98f1f 100644
+--- a/drivers/s390/char/raw3270.c
++++ b/drivers/s390/char/raw3270.c
+@@ -66,7 +66,7 @@ struct raw3270 {
+ static DEFINE_MUTEX(raw3270_mutex);
+ 
+ /* List of 3270 devices. */
+-static struct list_head raw3270_devices = LIST_HEAD_INIT(raw3270_devices);
++static LIST_HEAD(raw3270_devices);
+ 
+ /*
+  * Flag to indicate if the driver has been registered. Some operations
+@@ -1210,7 +1210,7 @@ struct raw3270_notifier {
+ 	void (*notifier)(int, int);
+ };
+ 
+-static struct list_head raw3270_notifier = LIST_HEAD_INIT(raw3270_notifier);
++static LIST_HEAD(raw3270_notifier);
+ 
+ int raw3270_register_notifier(void (*notifier)(int, int))
+ {
+diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
+index c7318a1..aa8186d 100644
+--- a/drivers/s390/char/sclp.h
++++ b/drivers/s390/char/sclp.h
+@@ -56,8 +56,6 @@ typedef unsigned int sclp_cmdw_t;
+ #define SCLP_CMDW_READ_EVENT_DATA	0x00770005
+ #define SCLP_CMDW_WRITE_EVENT_DATA	0x00760005
+ #define SCLP_CMDW_WRITE_EVENT_MASK	0x00780005
+-#define SCLP_CMDW_READ_SCP_INFO		0x00020001
+-#define SCLP_CMDW_READ_SCP_INFO_FORCED	0x00120001
+ 
+ #define GDS_ID_MDSMU		0x1310
+ #define GDS_ID_MDSROUTEINFO	0x1311
+@@ -83,6 +81,8 @@ extern u64 sclp_facilities;
+ 
+ #define SCLP_HAS_CHP_INFO	(sclp_facilities & 0x8000000000000000ULL)
+ #define SCLP_HAS_CHP_RECONFIG	(sclp_facilities & 0x2000000000000000ULL)
++#define SCLP_HAS_CPU_INFO	(sclp_facilities & 0x0800000000000000ULL)
++#define SCLP_HAS_CPU_RECONFIG	(sclp_facilities & 0x0400000000000000ULL)
+ 
+ struct gds_subvector {
+ 	u8	length;
+diff --git a/drivers/s390/char/sclp_chp.c b/drivers/s390/char/sclp_chp.c
+deleted file mode 100644
+index c68f5e7..0000000
+--- a/drivers/s390/char/sclp_chp.c
++++ /dev/null
+@@ -1,200 +0,0 @@
+-/*
+- *  drivers/s390/char/sclp_chp.c
+- *
+- *    Copyright IBM Corp. 2007
+- *    Author(s): Peter Oberparleiter <peter.oberparleiter at de.ibm.com>
+- */
+-
+-#include <linux/types.h>
+-#include <linux/gfp.h>
+-#include <linux/errno.h>
+-#include <linux/completion.h>
+-#include <asm/sclp.h>
+-#include <asm/chpid.h>
+-
+-#include "sclp.h"
+-
+-#define TAG	"sclp_chp: "
+-
+-#define SCLP_CMDW_CONFIGURE_CHANNEL_PATH	0x000f0001
+-#define SCLP_CMDW_DECONFIGURE_CHANNEL_PATH	0x000e0001
+-#define SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION	0x00030001
+-
+-static inline sclp_cmdw_t get_configure_cmdw(struct chp_id chpid)
+-{
+-	return SCLP_CMDW_CONFIGURE_CHANNEL_PATH | chpid.id << 8;
+-}
+-
+-static inline sclp_cmdw_t get_deconfigure_cmdw(struct chp_id chpid)
+-{
+-	return SCLP_CMDW_DECONFIGURE_CHANNEL_PATH | chpid.id << 8;
+-}
+-
+-static void chp_callback(struct sclp_req *req, void *data)
+-{
+-	struct completion *completion = data;
+-
+-	complete(completion);
+-}
+-
+-struct chp_cfg_sccb {
+-	struct sccb_header header;
+-	u8 ccm;
+-	u8 reserved[6];
+-	u8 cssid;
+-} __attribute__((packed));
+-
+-struct chp_cfg_data {
+-	struct chp_cfg_sccb sccb;
+-	struct sclp_req req;
+-	struct completion completion;
+-} __attribute__((packed));
+-
+-static int do_configure(sclp_cmdw_t cmd)
+-{
+-	struct chp_cfg_data *data;
+-	int rc;
+-
+-	if (!SCLP_HAS_CHP_RECONFIG)
+-		return -EOPNOTSUPP;
+-	/* Prepare sccb. */
+-	data = (struct chp_cfg_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+-	if (!data)
+-		return -ENOMEM;
+-	data->sccb.header.length = sizeof(struct chp_cfg_sccb);
+-	data->req.command = cmd;
+-	data->req.sccb = &(data->sccb);
+-	data->req.status = SCLP_REQ_FILLED;
+-	data->req.callback = chp_callback;
+-	data->req.callback_data = &(data->completion);
+-	init_completion(&data->completion);
+-
+-	/* Perform sclp request. */
+-	rc = sclp_add_request(&(data->req));
+-	if (rc)
+-		goto out;
+-	wait_for_completion(&data->completion);
+-
+-	/* Check response .*/
+-	if (data->req.status != SCLP_REQ_DONE) {
+-		printk(KERN_WARNING TAG "configure channel-path request failed "
+-		       "(status=0x%02x)\n", data->req.status);
+-		rc = -EIO;
+-		goto out;
+-	}
+-	switch (data->sccb.header.response_code) {
+-	case 0x0020:
+-	case 0x0120:
+-	case 0x0440:
+-	case 0x0450:
+-		break;
+-	default:
+-		printk(KERN_WARNING TAG "configure channel-path failed "
+-		       "(cmd=0x%08x, response=0x%04x)\n", cmd,
+-		       data->sccb.header.response_code);
+-		rc = -EIO;
+-		break;
+-	}
+-out:
+-	free_page((unsigned long) data);
+-
+-	return rc;
+-}
+-
+-/**
+- * sclp_chp_configure - perform configure channel-path sclp command
+- * @chpid: channel-path ID
+- *
+- * Perform configure channel-path command sclp command for specified chpid.
+- * Return 0 after command successfully finished, non-zero otherwise.
+- */
+-int sclp_chp_configure(struct chp_id chpid)
+-{
+-	return do_configure(get_configure_cmdw(chpid));
+-}
+-
+-/**
+- * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
+- * @chpid: channel-path ID
+- *
+- * Perform deconfigure channel-path command sclp command for specified chpid
+- * and wait for completion. On success return 0. Return non-zero otherwise.
+- */
+-int sclp_chp_deconfigure(struct chp_id chpid)
+-{
+-	return do_configure(get_deconfigure_cmdw(chpid));
+-}
+-
+-struct chp_info_sccb {
+-	struct sccb_header header;
+-	u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
+-	u8 standby[SCLP_CHP_INFO_MASK_SIZE];
+-	u8 configured[SCLP_CHP_INFO_MASK_SIZE];
+-	u8 ccm;
+-	u8 reserved[6];
+-	u8 cssid;
+-} __attribute__((packed));
+-
+-struct chp_info_data {
+-	struct chp_info_sccb sccb;
+-	struct sclp_req req;
+-	struct completion completion;
+-} __attribute__((packed));
+-
+-/**
+- * sclp_chp_read_info - perform read channel-path information sclp command
+- * @info: resulting channel-path information data
+- *
+- * Perform read channel-path information sclp command and wait for completion.
+- * On success, store channel-path information in @info and return 0. Return
+- * non-zero otherwise.
+- */
+-int sclp_chp_read_info(struct sclp_chp_info *info)
+-{
+-	struct chp_info_data *data;
+-	int rc;
+-
+-	if (!SCLP_HAS_CHP_INFO)
+-		return -EOPNOTSUPP;
+-	/* Prepare sccb. */
+-	data = (struct chp_info_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+-	if (!data)
+-		return -ENOMEM;
+-	data->sccb.header.length = sizeof(struct chp_info_sccb);
+-	data->req.command = SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION;
+-	data->req.sccb = &(data->sccb);
+-	data->req.status = SCLP_REQ_FILLED;
+-	data->req.callback = chp_callback;
+-	data->req.callback_data = &(data->completion);
+-	init_completion(&data->completion);
+-
+-	/* Perform sclp request. */
+-	rc = sclp_add_request(&(data->req));
+-	if (rc)
+-		goto out;
+-	wait_for_completion(&data->completion);
+-
+-	/* Check response .*/
+-	if (data->req.status != SCLP_REQ_DONE) {
+-		printk(KERN_WARNING TAG "read channel-path info request failed "
+-		       "(status=0x%02x)\n", data->req.status);
+-		rc = -EIO;
+-		goto out;
+-	}
+-	if (data->sccb.header.response_code != 0x0010) {
+-		printk(KERN_WARNING TAG "read channel-path info failed "
+-		       "(response=0x%04x)\n", data->sccb.header.response_code);
+-		rc = -EIO;
+-		goto out;
+-	}
+-	memcpy(info->recognized, data->sccb.recognized,
+-	       SCLP_CHP_INFO_MASK_SIZE);
+-	memcpy(info->standby, data->sccb.standby,
+-	       SCLP_CHP_INFO_MASK_SIZE);
+-	memcpy(info->configured, data->sccb.configured,
+-	       SCLP_CHP_INFO_MASK_SIZE);
+-out:
+-	free_page((unsigned long) data);
+-
+-	return rc;
+-}
+diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
+new file mode 100644
+index 0000000..b5c2339
+--- /dev/null
++++ b/drivers/s390/char/sclp_cmd.c
+@@ -0,0 +1,398 @@
++/*
++ *  drivers/s390/char/sclp_cmd.c
++ *
++ *    Copyright IBM Corp. 2007
++ *    Author(s): Heiko Carstens <heiko.carstens at de.ibm.com>,
++ *		 Peter Oberparleiter <peter.oberparleiter at de.ibm.com>
++ */
++
++#include <linux/completion.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <asm/chpid.h>
++#include <asm/sclp.h>
++#include "sclp.h"
++
++#define TAG	"sclp_cmd: "
++
++#define SCLP_CMDW_READ_SCP_INFO		0x00020001
++#define SCLP_CMDW_READ_SCP_INFO_FORCED	0x00120001
++
++struct read_info_sccb {
++	struct	sccb_header header;	/* 0-7 */
++	u16	rnmax;			/* 8-9 */
++	u8	rnsize;			/* 10 */
++	u8	_reserved0[24 - 11];	/* 11-15 */
++	u8	loadparm[8];		/* 24-31 */
++	u8	_reserved1[48 - 32];	/* 32-47 */
++	u64	facilities;		/* 48-55 */
++	u8	_reserved2[84 - 56];	/* 56-83 */
++	u8	fac84;			/* 84 */
++	u8	_reserved3[91 - 85];	/* 85-90 */
++	u8	flags;			/* 91 */
++	u8	_reserved4[100 - 92];	/* 92-99 */
++	u32	rnsize2;		/* 100-103 */
++	u64	rnmax2;			/* 104-111 */
++	u8	_reserved5[4096 - 112];	/* 112-4095 */
++} __attribute__((packed, aligned(PAGE_SIZE)));
++
++static struct read_info_sccb __initdata early_read_info_sccb;
++static int __initdata early_read_info_sccb_valid;
++
++u64 sclp_facilities;
++static u8 sclp_fac84;
++
++static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
++{
++	int rc;
++
++	__ctl_set_bit(0, 9);
++	rc = sclp_service_call(cmd, sccb);
++	if (rc)
++		goto out;
++	__load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT |
++			PSW_MASK_WAIT | PSW_DEFAULT_KEY);
++	local_irq_disable();
++out:
++	/* Contents of the sccb might have changed. */
++	barrier();
++	__ctl_clear_bit(0, 9);
++	return rc;
++}
++
++void __init sclp_read_info_early(void)
++{
++	int rc;
++	int i;
++	struct read_info_sccb *sccb;
++	sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
++				  SCLP_CMDW_READ_SCP_INFO};
++
++	sccb = &early_read_info_sccb;
++	for (i = 0; i < ARRAY_SIZE(commands); i++) {
++		do {
++			memset(sccb, 0, sizeof(*sccb));
++			sccb->header.length = sizeof(*sccb);
++			sccb->header.control_mask[2] = 0x80;
++			rc = sclp_cmd_sync_early(commands[i], sccb);
++		} while (rc == -EBUSY);
++
++		if (rc)
++			break;
++		if (sccb->header.response_code == 0x10) {
++			early_read_info_sccb_valid = 1;
++			break;
++		}
++		if (sccb->header.response_code != 0x1f0)
++			break;
++	}
++}
++
++void __init sclp_facilities_detect(void)
++{
++	if (!early_read_info_sccb_valid)
++		return;
++	sclp_facilities = early_read_info_sccb.facilities;
++	sclp_fac84 = early_read_info_sccb.fac84;
++}
++
++unsigned long long __init sclp_memory_detect(void)
++{
++	unsigned long long memsize;
++	struct read_info_sccb *sccb;
++
++	if (!early_read_info_sccb_valid)
++		return 0;
++	sccb = &early_read_info_sccb;
++	if (sccb->rnsize)
++		memsize = sccb->rnsize << 20;
++	else
++		memsize = sccb->rnsize2 << 20;
++	if (sccb->rnmax)
++		memsize *= sccb->rnmax;
++	else
++		memsize *= sccb->rnmax2;
++	return memsize;
++}
++
++/*
++ * This function will be called after sclp_memory_detect(), which gets called
++ * early from early.c code. Therefore the sccb should have valid contents.
++ */
++void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
++{
++	struct read_info_sccb *sccb;
++
++	if (!early_read_info_sccb_valid)
++		return;
++	sccb = &early_read_info_sccb;
++	info->is_valid = 1;
++	if (sccb->flags & 0x2)
++		info->has_dump = 1;
++	memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
++}
++
++static void sclp_sync_callback(struct sclp_req *req, void *data)
++{
++	struct completion *completion = data;
++
++	complete(completion);
++}
++
++static int do_sync_request(sclp_cmdw_t cmd, void *sccb)
++{
++	struct completion completion;
++	struct sclp_req *request;
++	int rc;
++
++	request = kzalloc(sizeof(*request), GFP_KERNEL);
++	if (!request)
++		return -ENOMEM;
++	request->command = cmd;
++	request->sccb = sccb;
++	request->status = SCLP_REQ_FILLED;
++	request->callback = sclp_sync_callback;
++	request->callback_data = &completion;
++	init_completion(&completion);
++
++	/* Perform sclp request. */
++	rc = sclp_add_request(request);
++	if (rc)
++		goto out;
++	wait_for_completion(&completion);
++
++	/* Check response. */
++	if (request->status != SCLP_REQ_DONE) {
++		printk(KERN_WARNING TAG "sync request failed "
++		       "(cmd=0x%08x, status=0x%02x)\n", cmd, request->status);
++		rc = -EIO;
++	}
++out:
++	kfree(request);
++	return rc;
++}
++
++/*
++ * CPU configuration related functions.
++ */
++
++#define SCLP_CMDW_READ_CPU_INFO		0x00010001
++#define SCLP_CMDW_CONFIGURE_CPU		0x00110001
++#define SCLP_CMDW_DECONFIGURE_CPU	0x00100001
++
++struct read_cpu_info_sccb {
++	struct	sccb_header header;
++	u16	nr_configured;
++	u16	offset_configured;
++	u16	nr_standby;
++	u16	offset_standby;
++	u8	reserved[4096 - 16];
++} __attribute__((packed, aligned(PAGE_SIZE)));
++
++static void sclp_fill_cpu_info(struct sclp_cpu_info *info,
++			       struct read_cpu_info_sccb *sccb)
++{
++	char *page = (char *) sccb;
++
++	memset(info, 0, sizeof(*info));
++	info->configured = sccb->nr_configured;
++	info->standby = sccb->nr_standby;
++	info->combined = sccb->nr_configured + sccb->nr_standby;
++	info->has_cpu_type = sclp_fac84 & 0x1;
++	memcpy(&info->cpu, page + sccb->offset_configured,
++	       info->combined * sizeof(struct sclp_cpu_entry));
++}
++
++int sclp_get_cpu_info(struct sclp_cpu_info *info)
++{
++	int rc;
++	struct read_cpu_info_sccb *sccb;
++
++	if (!SCLP_HAS_CPU_INFO)
++		return -EOPNOTSUPP;
++	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
++	if (!sccb)
++		return -ENOMEM;
++	sccb->header.length = sizeof(*sccb);
++	rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb);
++	if (rc)
++		goto out;
++	if (sccb->header.response_code != 0x0010) {
++		printk(KERN_WARNING TAG "readcpuinfo failed "
++		       "(response=0x%04x)\n", sccb->header.response_code);
++		rc = -EIO;
++		goto out;
++	}
++	sclp_fill_cpu_info(info, sccb);
++out:
++	free_page((unsigned long) sccb);
++	return rc;
++}
++
++struct cpu_configure_sccb {
++	struct sccb_header header;
++} __attribute__((packed, aligned(8)));
++
++static int do_cpu_configure(sclp_cmdw_t cmd)
++{
++	struct cpu_configure_sccb *sccb;
++	int rc;
++
++	if (!SCLP_HAS_CPU_RECONFIG)
++		return -EOPNOTSUPP;
++	/*
++	 * This is not going to cross a page boundary since we force
++	 * kmalloc to have a minimum alignment of 8 bytes on s390.
++	 */
++	sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
++	if (!sccb)
++		return -ENOMEM;
++	sccb->header.length = sizeof(*sccb);
++	rc = do_sync_request(cmd, sccb);
++	if (rc)
++		goto out;
++	switch (sccb->header.response_code) {
++	case 0x0020:
++	case 0x0120:
++		break;
++	default:
++		printk(KERN_WARNING TAG "configure cpu failed (cmd=0x%08x, "
++		       "response=0x%04x)\n", cmd, sccb->header.response_code);
++		rc = -EIO;
++		break;
++	}
++out:
++	kfree(sccb);
++	return rc;
++}
++
++int sclp_cpu_configure(u8 cpu)
++{
++	return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8);
++}
++
++int sclp_cpu_deconfigure(u8 cpu)
++{
++	return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
++}
++
++/*
++ * Channel path configuration related functions.
++ */
++
++#define SCLP_CMDW_CONFIGURE_CHPATH		0x000f0001
++#define SCLP_CMDW_DECONFIGURE_CHPATH		0x000e0001
++#define SCLP_CMDW_READ_CHPATH_INFORMATION	0x00030001
++
++struct chp_cfg_sccb {
++	struct sccb_header header;
++	u8 ccm;
++	u8 reserved[6];
++	u8 cssid;
++} __attribute__((packed));
++
++static int do_chp_configure(sclp_cmdw_t cmd)
++{
++	struct chp_cfg_sccb *sccb;
++	int rc;
++
++	if (!SCLP_HAS_CHP_RECONFIG)
++		return -EOPNOTSUPP;
++	/* Prepare sccb. */
++	sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
++	if (!sccb)
++		return -ENOMEM;
++	sccb->header.length = sizeof(*sccb);
++	rc = do_sync_request(cmd, sccb);
++	if (rc)
++		goto out;
++	switch (sccb->header.response_code) {
++	case 0x0020:
++	case 0x0120:
++	case 0x0440:
++	case 0x0450:
++		break;
++	default:
++		printk(KERN_WARNING TAG "configure channel-path failed "
++		       "(cmd=0x%08x, response=0x%04x)\n", cmd,
++		       sccb->header.response_code);
++		rc = -EIO;
++		break;
++	}
++out:
++	free_page((unsigned long) sccb);
++	return rc;
++}
++
++/**
++ * sclp_chp_configure - perform configure channel-path sclp command
++ * @chpid: channel-path ID
++ *
++ * Perform configure channel-path command sclp command for specified chpid.
++ * Return 0 after command successfully finished, non-zero otherwise.
++ */
++int sclp_chp_configure(struct chp_id chpid)
++{
++	return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
++}
++
++/**
++ * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
++ * @chpid: channel-path ID
++ *
++ * Perform deconfigure channel-path command sclp command for specified chpid
++ * and wait for completion. On success return 0. Return non-zero otherwise.
++ */
++int sclp_chp_deconfigure(struct chp_id chpid)
++{
++	return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
++}
++
++struct chp_info_sccb {
++	struct sccb_header header;
++	u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
++	u8 standby[SCLP_CHP_INFO_MASK_SIZE];
++	u8 configured[SCLP_CHP_INFO_MASK_SIZE];
++	u8 ccm;
++	u8 reserved[6];
++	u8 cssid;
++} __attribute__((packed));
++
++/**
++ * sclp_chp_read_info - perform read channel-path information sclp command
++ * @info: resulting channel-path information data
++ *
++ * Perform read channel-path information sclp command and wait for completion.
++ * On success, store channel-path information in @info and return 0. Return
++ * non-zero otherwise.
++ */
++int sclp_chp_read_info(struct sclp_chp_info *info)
++{
++	struct chp_info_sccb *sccb;
++	int rc;
++
++	if (!SCLP_HAS_CHP_INFO)
++		return -EOPNOTSUPP;
++	/* Prepare sccb. */
++	sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
++	if (!sccb)
++		return -ENOMEM;
++	sccb->header.length = sizeof(*sccb);
++	rc = do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
++	if (rc)
++		goto out;
++	if (sccb->header.response_code != 0x0010) {
++		printk(KERN_WARNING TAG "read channel-path info failed "
++		       "(response=0x%04x)\n", sccb->header.response_code);
++		rc = -EIO;
++		goto out;
++	}
++	memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
++	memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
++	memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
++out:
++	free_page((unsigned long) sccb);
++	return rc;
++}
+diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
+index 5322e5e..9dc77f1 100644
+--- a/drivers/s390/char/sclp_config.c
++++ b/drivers/s390/char/sclp_config.c
+@@ -29,12 +29,12 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
+ 	struct sys_device *sysdev;
+ 
+ 	printk(KERN_WARNING TAG "cpu capability changed.\n");
+-	lock_cpu_hotplug();
++	get_online_cpus();
+ 	for_each_online_cpu(cpu) {
+ 		sysdev = get_cpu_sysdev(cpu);
+ 		kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+ 	}
+-	unlock_cpu_hotplug();
++	put_online_cpus();
+ }
+ 
+ static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
+diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
+index 82a13d9..5716487 100644
+--- a/drivers/s390/char/sclp_cpi.c
++++ b/drivers/s390/char/sclp_cpi.c
+@@ -1,255 +1,41 @@
+ /*
+- * Author: Martin Peschke <mpeschke at de.ibm.com>
+- * Copyright (C) 2001 IBM Entwicklung GmbH, IBM Corporation
++ *  drivers/s390/char/sclp_cpi.c
++ *    SCLP control programm identification
+  *
+- * SCLP Control-Program Identification.
++ *    Copyright IBM Corp. 2001, 2007
++ *    Author(s): Martin Peschke <mpeschke at de.ibm.com>
++ *		 Michael Ernst <mernst at de.ibm.com>
+  */
+ 
+-#include <linux/version.h>
+ #include <linux/kmod.h>
+ #include <linux/module.h>
+ #include <linux/moduleparam.h>
+-#include <linux/init.h>
+-#include <linux/timer.h>
+-#include <linux/string.h>
+-#include <linux/err.h>
+-#include <linux/slab.h>
+-#include <asm/ebcdic.h>
+-#include <asm/semaphore.h>
+-
+-#include "sclp.h"
+-#include "sclp_rw.h"
+-
+-#define CPI_LENGTH_SYSTEM_TYPE	8
+-#define CPI_LENGTH_SYSTEM_NAME	8
+-#define CPI_LENGTH_SYSPLEX_NAME	8
+-
+-struct cpi_evbuf {
+-	struct evbuf_header header;
+-	u8	id_format;
+-	u8	reserved0;
+-	u8	system_type[CPI_LENGTH_SYSTEM_TYPE];
+-	u64	reserved1;
+-	u8	system_name[CPI_LENGTH_SYSTEM_NAME];
+-	u64	reserved2;
+-	u64	system_level;
+-	u64	reserved3;
+-	u8	sysplex_name[CPI_LENGTH_SYSPLEX_NAME];
+-	u8	reserved4[16];
+-} __attribute__((packed));
+-
+-struct cpi_sccb {
+-	struct sccb_header header;
+-	struct cpi_evbuf cpi_evbuf;
+-} __attribute__((packed));
+-
+-/* Event type structure for write message and write priority message */
+-static struct sclp_register sclp_cpi_event =
+-{
+-	.send_mask = EVTYP_CTLPROGIDENT_MASK
+-};
++#include <linux/version.h>
++#include "sclp_cpi_sys.h"
+ 
+ MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Identify this operating system instance "
++		   "to the System z hardware");
++MODULE_AUTHOR("Martin Peschke <mpeschke at de.ibm.com>, "
++	      "Michael Ernst <mernst at de.ibm.com>");
+ 
+-MODULE_AUTHOR(
+-	"Martin Peschke, IBM Deutschland Entwicklung GmbH "
+-	"<mpeschke at de.ibm.com>");
+-
+-MODULE_DESCRIPTION(
+-	"identify this operating system instance to the S/390 "
+-	"or zSeries hardware");
++static char *system_name = "";
++static char *sysplex_name = "";
+ 
+-static char *system_name = NULL;
+ module_param(system_name, charp, 0);
+ MODULE_PARM_DESC(system_name, "e.g. hostname - max. 8 characters");
+-
+-static char *sysplex_name = NULL;
+-#ifdef ALLOW_SYSPLEX_NAME
+ module_param(sysplex_name, charp, 0);
+ MODULE_PARM_DESC(sysplex_name, "if applicable - max. 8 characters");
+-#endif
+-
+-/* use default value for this field (as well as for system level) */
+-static char *system_type = "LINUX";
+ 
+-static int
+-cpi_check_parms(void)
++static int __init cpi_module_init(void)
+ {
+-	/* reject if no system type specified */
+-	if (!system_type) {
+-		printk("cpi: bug: no system type specified\n");
+-		return -EINVAL;
+-	}
+-
+-	/* reject if system type larger than 8 characters */
+-	if (strlen(system_type) > CPI_LENGTH_SYSTEM_NAME) {
+-		printk("cpi: bug: system type has length of %li characters - "
+-		       "only %i characters supported\n",
+-		       strlen(system_type), CPI_LENGTH_SYSTEM_TYPE);
+-		return -EINVAL;
+-	}
+-
+-	/* reject if no system name specified */
+-	if (!system_name) {
+-		printk("cpi: no system name specified\n");
+-		return -EINVAL;
+-	}
+-
+-	/* reject if system name larger than 8 characters */
+-	if (strlen(system_name) > CPI_LENGTH_SYSTEM_NAME) {
+-		printk("cpi: system name has length of %li characters - "
+-		       "only %i characters supported\n",
+-		       strlen(system_name), CPI_LENGTH_SYSTEM_NAME);
+-		return -EINVAL;
+-	}
+-
+-	/* reject if specified sysplex name larger than 8 characters */
+-	if (sysplex_name && strlen(sysplex_name) > CPI_LENGTH_SYSPLEX_NAME) {
+-		printk("cpi: sysplex name has length of %li characters"
+-		       " - only %i characters supported\n",
+-		       strlen(sysplex_name), CPI_LENGTH_SYSPLEX_NAME);
+-		return -EINVAL;
+-	}
+-	return 0;
++	return sclp_cpi_set_data(system_name, sysplex_name, "LINUX",
++				 LINUX_VERSION_CODE);
+ }
+ 
+-static void
+-cpi_callback(struct sclp_req *req, void *data)
+-{
+-	struct semaphore *sem;
+-
+-	sem = (struct semaphore *) data;
+-	up(sem);
+-}
+-
+-static struct sclp_req *
+-cpi_prepare_req(void)
+-{
+-	struct sclp_req *req;
+-	struct cpi_sccb *sccb;
+-	struct cpi_evbuf *evb;
+-
+-	req = kmalloc(sizeof(struct sclp_req), GFP_KERNEL);
+-	if (req == NULL)
+-		return ERR_PTR(-ENOMEM);
+-	sccb = (struct cpi_sccb *) __get_free_page(GFP_KERNEL | GFP_DMA);
+-	if (sccb == NULL) {
+-		kfree(req);
+-		return ERR_PTR(-ENOMEM);
+-	}
+-	memset(sccb, 0, sizeof(struct cpi_sccb));
+-
+-	/* setup SCCB for Control-Program Identification */
+-	sccb->header.length = sizeof(struct cpi_sccb);
+-	sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf);
+-	sccb->cpi_evbuf.header.type = 0x0B;
+-	evb = &sccb->cpi_evbuf;
+-
+-	/* set system type */
+-	memset(evb->system_type, ' ', CPI_LENGTH_SYSTEM_TYPE);
+-	memcpy(evb->system_type, system_type, strlen(system_type));
+-	sclp_ascebc_str(evb->system_type, CPI_LENGTH_SYSTEM_TYPE);
+-	EBC_TOUPPER(evb->system_type, CPI_LENGTH_SYSTEM_TYPE);
+-
+-	/* set system name */
+-	memset(evb->system_name, ' ', CPI_LENGTH_SYSTEM_NAME);
+-	memcpy(evb->system_name, system_name, strlen(system_name));
+-	sclp_ascebc_str(evb->system_name, CPI_LENGTH_SYSTEM_NAME);
+-	EBC_TOUPPER(evb->system_name, CPI_LENGTH_SYSTEM_NAME);
+-
+-	/* set system level */
+-	evb->system_level = LINUX_VERSION_CODE;
+-
+-	/* set sysplex name */
+-	if (sysplex_name) {
+-		memset(evb->sysplex_name, ' ', CPI_LENGTH_SYSPLEX_NAME);
+-		memcpy(evb->sysplex_name, sysplex_name, strlen(sysplex_name));
+-		sclp_ascebc_str(evb->sysplex_name, CPI_LENGTH_SYSPLEX_NAME);
+-		EBC_TOUPPER(evb->sysplex_name, CPI_LENGTH_SYSPLEX_NAME);
+-	}
+-
+-	/* prepare request data structure presented to SCLP driver */
+-	req->command = SCLP_CMDW_WRITE_EVENT_DATA;
+-	req->sccb = sccb;
+-	req->status = SCLP_REQ_FILLED;
+-	req->callback = cpi_callback;
+-	return req;
+-}
+-
+-static void
+-cpi_free_req(struct sclp_req *req)
+-{
+-	free_page((unsigned long) req->sccb);
+-	kfree(req);
+-}
+-
+-static int __init
+-cpi_module_init(void)
+-{
+-	struct semaphore sem;
+-	struct sclp_req *req;
+-	int rc;
+-
+-	rc = cpi_check_parms();
+-	if (rc)
+-		return rc;
+-
+-	rc = sclp_register(&sclp_cpi_event);
+-	if (rc) {
+-		/* could not register sclp event. Die. */
+-		printk(KERN_WARNING "cpi: could not register to hardware "
+-		       "console.\n");
+-		return -EINVAL;
+-	}
+-	if (!(sclp_cpi_event.sclp_send_mask & EVTYP_CTLPROGIDENT_MASK)) {
+-		printk(KERN_WARNING "cpi: no control program identification "
+-		       "support\n");
+-		sclp_unregister(&sclp_cpi_event);
+-		return -EOPNOTSUPP;
+-	}
+-
+-	req = cpi_prepare_req();
+-	if (IS_ERR(req)) {
+-		printk(KERN_WARNING "cpi: couldn't allocate request\n");
+-		sclp_unregister(&sclp_cpi_event);
+-		return PTR_ERR(req);
+-	}
+-
+-	/* Prepare semaphore */
+-	sema_init(&sem, 0);
+-	req->callback_data = &sem;
+-	/* Add request to sclp queue */
+-	rc = sclp_add_request(req);
+-	if (rc) {
+-		printk(KERN_WARNING "cpi: could not start request\n");
+-		cpi_free_req(req);
+-		sclp_unregister(&sclp_cpi_event);
+-		return rc;
+-	}
+-	/* make "insmod" sleep until callback arrives */
+-	down(&sem);
+-
+-	rc = ((struct cpi_sccb *) req->sccb)->header.response_code;
+-	if (rc != 0x0020) {
+-		printk(KERN_WARNING "cpi: failed with response code 0x%x\n",
+-		       rc);
+-		rc = -ECOMM;
+-	} else
+-		rc = 0;
+-
+-	cpi_free_req(req);
+-	sclp_unregister(&sclp_cpi_event);
+-
+-	return rc;
+-}
+-
+-
+ static void __exit cpi_module_exit(void)
+ {
+ }
+ 
+-
+-/* declare driver module init/cleanup functions */
+ module_init(cpi_module_init);
+ module_exit(cpi_module_exit);
+-
+diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
+new file mode 100644
+index 0000000..4161703
+--- /dev/null
++++ b/drivers/s390/char/sclp_cpi_sys.c
+@@ -0,0 +1,400 @@
++/*
++ *  drivers/s390/char/sclp_cpi_sys.c
++ *    SCLP control program identification sysfs interface
++ *
++ *    Copyright IBM Corp. 2001, 2007
++ *    Author(s): Martin Peschke <mpeschke at de.ibm.com>
++ *		 Michael Ernst <mernst at de.ibm.com>
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/stat.h>
++#include <linux/device.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <linux/kmod.h>
++#include <linux/timer.h>
++#include <linux/err.h>
++#include <linux/slab.h>
++#include <linux/completion.h>
++#include <asm/ebcdic.h>
++#include <asm/sclp.h>
++#include "sclp.h"
++#include "sclp_rw.h"
++#include "sclp_cpi_sys.h"
++
++#define CPI_LENGTH_NAME 8
++#define CPI_LENGTH_LEVEL 16
++
++struct cpi_evbuf {
++	struct evbuf_header header;
++	u8	id_format;
++	u8	reserved0;
++	u8	system_type[CPI_LENGTH_NAME];
++	u64	reserved1;
++	u8	system_name[CPI_LENGTH_NAME];
++	u64	reserved2;
++	u64	system_level;
++	u64	reserved3;
++	u8	sysplex_name[CPI_LENGTH_NAME];
++	u8	reserved4[16];
++} __attribute__((packed));
++
++struct cpi_sccb {
++	struct sccb_header header;
++	struct cpi_evbuf cpi_evbuf;
++} __attribute__((packed));
++
++static struct sclp_register sclp_cpi_event = {
++	.send_mask = EVTYP_CTLPROGIDENT_MASK,
++};
++
++static char system_name[CPI_LENGTH_NAME + 1];
++static char sysplex_name[CPI_LENGTH_NAME + 1];
++static char system_type[CPI_LENGTH_NAME + 1];
++static u64 system_level;
++
++static void set_data(char *field, char *data)
++{
++	memset(field, ' ', CPI_LENGTH_NAME);
++	memcpy(field, data, strlen(data));
++	sclp_ascebc_str(field, CPI_LENGTH_NAME);
++}
++
++static void cpi_callback(struct sclp_req *req, void *data)
++{
++	struct completion *completion = data;
++
++	complete(completion);
++}
++
++static struct sclp_req *cpi_prepare_req(void)
++{
++	struct sclp_req *req;
++	struct cpi_sccb *sccb;
++	struct cpi_evbuf *evb;
++
++	req = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
++	if (!req)
++		return ERR_PTR(-ENOMEM);
++	sccb = (struct cpi_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
++	if (!sccb) {
++		kfree(req);
++		return ERR_PTR(-ENOMEM);
++	}
++
++	/* setup SCCB for Control-Program Identification */
++	sccb->header.length = sizeof(struct cpi_sccb);
++	sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf);
++	sccb->cpi_evbuf.header.type = 0x0b;
++	evb = &sccb->cpi_evbuf;
++
++	/* set system type */
++	set_data(evb->system_type, system_type);
++
++	/* set system name */
++	set_data(evb->system_name, system_name);
++
++	/* set sytem level */
++	evb->system_level = system_level;
++
++	/* set sysplex name */
++	set_data(evb->sysplex_name, sysplex_name);
++
++	/* prepare request data structure presented to SCLP driver */
++	req->command = SCLP_CMDW_WRITE_EVENT_DATA;
++	req->sccb = sccb;
++	req->status = SCLP_REQ_FILLED;
++	req->callback = cpi_callback;
++	return req;
++}
++
++static void cpi_free_req(struct sclp_req *req)
++{
++	free_page((unsigned long) req->sccb);
++	kfree(req);
++}
++
++static int cpi_req(void)
++{
++	struct completion completion;
++	struct sclp_req *req;
++	int rc;
++	int response;
++
++	rc = sclp_register(&sclp_cpi_event);
++	if (rc) {
++		printk(KERN_WARNING "cpi: could not register "
++			"to hardware console.\n");
++		goto out;
++	}
++	if (!(sclp_cpi_event.sclp_send_mask & EVTYP_CTLPROGIDENT_MASK)) {
++		printk(KERN_WARNING "cpi: no control program "
++			"identification support\n");
++		rc = -EOPNOTSUPP;
++		goto out_unregister;
++	}
++
++	req = cpi_prepare_req();
++	if (IS_ERR(req)) {
++		printk(KERN_WARNING "cpi: could not allocate request\n");
++		rc = PTR_ERR(req);
++		goto out_unregister;
++	}
++
++	init_completion(&completion);
++	req->callback_data = &completion;
++
++	/* Add request to sclp queue */
++	rc = sclp_add_request(req);
++	if (rc) {
++		printk(KERN_WARNING "cpi: could not start request\n");
++		goto out_free_req;
++	}
++
++	wait_for_completion(&completion);
++
++	if (req->status != SCLP_REQ_DONE) {
++		printk(KERN_WARNING "cpi: request failed (status=0x%02x)\n",
++			req->status);
++		rc = -EIO;
++		goto out_free_req;
++	}
++
++	response = ((struct cpi_sccb *) req->sccb)->header.response_code;
++	if (response != 0x0020) {
++		printk(KERN_WARNING "cpi: failed with "
++			"response code 0x%x\n", response);
++		rc = -EIO;
++	}
++
++out_free_req:
++	cpi_free_req(req);
++
++out_unregister:
++	sclp_unregister(&sclp_cpi_event);
++
++out:
++	return rc;
++}
++
++static int check_string(const char *attr, const char *str)
++{
++	size_t len;
++	size_t i;
++
++	len = strlen(str);
++
++	if ((len > 0) && (str[len - 1] == '\n'))
++		len--;
++
++	if (len > CPI_LENGTH_NAME)
++		return -EINVAL;
++
++	for (i = 0; i < len ; i++) {
++		if (isalpha(str[i]) || isdigit(str[i]) ||
++		    strchr("$@# ", str[i]))
++			continue;
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++static void set_string(char *attr, const char *value)
++{
++	size_t len;
++	size_t i;
++
++	len = strlen(value);
++
++	if ((len > 0) && (value[len - 1] == '\n'))
++		len--;
++
++	for (i = 0; i < CPI_LENGTH_NAME; i++) {
++		if (i < len)
++			attr[i] = toupper(value[i]);
++		else
++			attr[i] = ' ';
++	}
++}
++
++static ssize_t system_name_show(struct kobject *kobj,
++				struct kobj_attribute *attr, char *page)
++{
++	return snprintf(page, PAGE_SIZE, "%s\n", system_name);
++}
++
++static ssize_t system_name_store(struct kobject *kobj,
++				 struct kobj_attribute *attr,
++				 const char *buf,
++	size_t len)
++{
++	int rc;
++
++	rc = check_string("system_name", buf);
++	if (rc)
++		return rc;
++
++	set_string(system_name, buf);
++
++	return len;
++}
++
++static struct kobj_attribute system_name_attr =
++	__ATTR(system_name, 0644, system_name_show, system_name_store);
++
++static ssize_t sysplex_name_show(struct kobject *kobj,
++				 struct kobj_attribute *attr, char *page)
++{
++	return snprintf(page, PAGE_SIZE, "%s\n", sysplex_name);
++}
++
++static ssize_t sysplex_name_store(struct kobject *kobj,
++				  struct kobj_attribute *attr,
++				  const char *buf,
++	size_t len)
++{
++	int rc;
++
++	rc = check_string("sysplex_name", buf);
++	if (rc)
++		return rc;
++
++	set_string(sysplex_name, buf);
++
++	return len;
++}
++
++static struct kobj_attribute sysplex_name_attr =
++	__ATTR(sysplex_name, 0644, sysplex_name_show, sysplex_name_store);
++
++static ssize_t system_type_show(struct kobject *kobj,
++				struct kobj_attribute *attr, char *page)
++{
++	return snprintf(page, PAGE_SIZE, "%s\n", system_type);
++}
++
++static ssize_t system_type_store(struct kobject *kobj,
++				 struct kobj_attribute *attr,
++				 const char *buf,
++	size_t len)
++{
++	int rc;
++
++	rc = check_string("system_type", buf);
++	if (rc)
++		return rc;
++
++	set_string(system_type, buf);
++
++	return len;
++}
++
++static struct kobj_attribute system_type_attr =
++	__ATTR(system_type, 0644, system_type_show, system_type_store);
++
++static ssize_t system_level_show(struct kobject *kobj,
++				 struct kobj_attribute *attr, char *page)
++{
++	unsigned long long level = system_level;
++
++	return snprintf(page, PAGE_SIZE, "%#018llx\n", level);
++}
++
++static ssize_t system_level_store(struct kobject *kobj,
++				  struct kobj_attribute *attr,
++				  const char *buf,
++	size_t len)
++{
++	unsigned long long level;
++	char *endp;
++
++	level = simple_strtoull(buf, &endp, 16);
++
++	if (endp == buf)
++		return -EINVAL;
++	if (*endp == '\n')
++		endp++;
++	if (*endp)
++		return -EINVAL;
++
++	system_level = level;
++
++	return len;
++}
++
++static struct kobj_attribute system_level_attr =
++	__ATTR(system_level, 0644, system_level_show, system_level_store);
++
++static ssize_t set_store(struct kobject *kobj,
++			 struct kobj_attribute *attr,
++			 const char *buf, size_t len)
++{
++	int rc;
++
++	rc = cpi_req();
++	if (rc)
++		return rc;
++
++	return len;
++}
++
++static struct kobj_attribute set_attr = __ATTR(set, 0200, NULL, set_store);
++
++static struct attribute *cpi_attrs[] = {
++	&system_name_attr.attr,
++	&sysplex_name_attr.attr,
++	&system_type_attr.attr,
++	&system_level_attr.attr,
++	&set_attr.attr,
++	NULL,
++};
++
++static struct attribute_group cpi_attr_group = {
++	.attrs = cpi_attrs,
++};
++
++static struct kset *cpi_kset;
++
++int sclp_cpi_set_data(const char *system, const char *sysplex, const char *type,
++		      const u64 level)
++{
++	int rc;
++
++	rc = check_string("system_name", system);
++	if (rc)
++		return rc;
++	rc = check_string("sysplex_name", sysplex);
++	if (rc)
++		return rc;
++	rc = check_string("system_type", type);
++	if (rc)
++		return rc;
++
++	set_string(system_name, system);
++	set_string(sysplex_name, sysplex);
++	set_string(system_type, type);
++	system_level = level;
++
++	return cpi_req();
++}
++EXPORT_SYMBOL(sclp_cpi_set_data);
++
++static int __init cpi_init(void)
++{
++	int rc;
++
++	cpi_kset = kset_create_and_add("cpi", NULL, firmware_kobj);
++	if (!cpi_kset)
++		return -ENOMEM;
++
++	rc = sysfs_create_group(&cpi_kset->kobj, &cpi_attr_group);
++	if (rc)
++		kset_unregister(cpi_kset);
++
++	return rc;
++}
++
++__initcall(cpi_init);
+diff --git a/drivers/s390/char/sclp_cpi_sys.h b/drivers/s390/char/sclp_cpi_sys.h
+new file mode 100644
+index 0000000..deef3e6
+--- /dev/null
++++ b/drivers/s390/char/sclp_cpi_sys.h
+@@ -0,0 +1,15 @@
++/*
++ *  drivers/s390/char/sclp_cpi_sys.h
++ *    SCLP control program identification sysfs interface
++ *
++ *    Copyright IBM Corp. 2007
++ *    Author(s): Michael Ernst <mernst at de.ibm.com>
++ */
++
++#ifndef __SCLP_CPI_SYS_H__
++#define __SCLP_CPI_SYS_H__
++
++int sclp_cpi_set_data(const char *system, const char *sysplex,
++		      const char *type, u64 level);
++
++#endif	 /* __SCLP_CPI_SYS_H__ */
+diff --git a/drivers/s390/char/sclp_info.c b/drivers/s390/char/sclp_info.c
+deleted file mode 100644
+index a1136e0..0000000
+--- a/drivers/s390/char/sclp_info.c
++++ /dev/null
+@@ -1,116 +0,0 @@
+-/*
+- *  drivers/s390/char/sclp_info.c
+- *
+- *    Copyright IBM Corp. 2007
+- *    Author(s): Heiko Carstens <heiko.carstens at de.ibm.com>
+- */
+-
+-#include <linux/init.h>
+-#include <linux/errno.h>
+-#include <linux/string.h>
+-#include <asm/sclp.h>
+-#include "sclp.h"
+-
+-struct sclp_readinfo_sccb {
+-	struct	sccb_header header;	/* 0-7 */
+-	u16	rnmax;			/* 8-9 */
+-	u8	rnsize;			/* 10 */
+-	u8	_reserved0[24 - 11];	/* 11-23 */
+-	u8	loadparm[8];		/* 24-31 */
+-	u8	_reserved1[48 - 32];	/* 32-47 */
+-	u64	facilities;		/* 48-55 */
+-	u8	_reserved2[91 - 56];	/* 56-90 */
+-	u8	flags;			/* 91 */
+-	u8	_reserved3[100 - 92];	/* 92-99 */
+-	u32	rnsize2;		/* 100-103 */
+-	u64	rnmax2;			/* 104-111 */
+-	u8	_reserved4[4096 - 112];	/* 112-4095 */
+-} __attribute__((packed, aligned(4096)));
+-
+-static struct sclp_readinfo_sccb __initdata early_readinfo_sccb;
+-static int __initdata early_readinfo_sccb_valid;
+-
+-u64 sclp_facilities;
+-
+-void __init sclp_readinfo_early(void)
+-{
+-	int ret;
+-	int i;
+-	struct sclp_readinfo_sccb *sccb;
+-	sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
+-				  SCLP_CMDW_READ_SCP_INFO};
+-
+-	/* Enable service signal subclass mask. */
+-	__ctl_set_bit(0, 9);
+-	sccb = &early_readinfo_sccb;
+-	for (i = 0; i < ARRAY_SIZE(commands); i++) {
+-		do {
+-			memset(sccb, 0, sizeof(*sccb));
+-			sccb->header.length = sizeof(*sccb);
+-			sccb->header.control_mask[2] = 0x80;
+-			ret = sclp_service_call(commands[i], sccb);
+-		} while (ret == -EBUSY);
+-
+-		if (ret)
+-			break;
+-		__load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT |
+-				PSW_MASK_WAIT | PSW_DEFAULT_KEY);
+-		local_irq_disable();
+-		/*
+-		 * Contents of the sccb might have changed
+-		 * therefore a barrier is needed.
+-		 */
+-		barrier();
+-		if (sccb->header.response_code == 0x10) {
+-			early_readinfo_sccb_valid = 1;
+-			break;
+-		}
+-		if (sccb->header.response_code != 0x1f0)
+-			break;
+-	}
+-	/* Disable service signal subclass mask again. */
+-	__ctl_clear_bit(0, 9);
+-}
+-
+-void __init sclp_facilities_detect(void)
+-{
+-	if (!early_readinfo_sccb_valid)
+-		return;
+-	sclp_facilities = early_readinfo_sccb.facilities;
+-}
+-
+-unsigned long long __init sclp_memory_detect(void)
+-{
+-	unsigned long long memsize;
+-	struct sclp_readinfo_sccb *sccb;
+-
+-	if (!early_readinfo_sccb_valid)
+-		return 0;
+-	sccb = &early_readinfo_sccb;
+-	if (sccb->rnsize)
+-		memsize = sccb->rnsize << 20;
+-	else
+-		memsize = sccb->rnsize2 << 20;
+-	if (sccb->rnmax)
+-		memsize *= sccb->rnmax;
+-	else
+-		memsize *= sccb->rnmax2;
+-	return memsize;
+-}
+-
+-/*
+- * This function will be called after sclp_memory_detect(), which gets called
+- * early from early.c code. Therefore the sccb should have valid contents.
+- */
+-void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
+-{
+-	struct sclp_readinfo_sccb *sccb;
+-
+-	if (!early_readinfo_sccb_valid)
+-		return;
+-	sccb = &early_readinfo_sccb;
+-	info->is_valid = 1;
+-	if (sccb->flags & 0x2)
+-		info->has_dump = 1;
+-	memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
+-}
+diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
+index d6b06ab..ad7195d 100644
+--- a/drivers/s390/char/sclp_rw.c
++++ b/drivers/s390/char/sclp_rw.c
+@@ -76,7 +76,7 @@ sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
+ }
+ 
+ /*
+- * Return a pointer to the orignal page that has been used to create
++ * Return a pointer to the original page that has been used to create
+  * the buffer.
+  */
+ void *
+diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
+index da25f8e..8246ef3 100644
+--- a/drivers/s390/char/tape_3590.c
++++ b/drivers/s390/char/tape_3590.c
+@@ -1495,7 +1495,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
+ 			   device->cdev->dev.bus_id);
+ 		return tape_3590_erp_basic(device, request, irb, -EPERM);
+ 	case 0x8013:
+-		PRINT_WARN("(%s): Another host has priviliged access to the "
++		PRINT_WARN("(%s): Another host has privileged access to the "
+ 			   "tape device\n", device->cdev->dev.bus_id);
+ 		PRINT_WARN("(%s): To solve the problem unload the current "
+ 			   "cartridge!\n", device->cdev->dev.bus_id);
+diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
+index 2fae633..7ad8cf1 100644
+--- a/drivers/s390/char/tape_core.c
++++ b/drivers/s390/char/tape_core.c
+@@ -37,7 +37,7 @@ static void tape_long_busy_timeout(unsigned long data);
+  * we can assign the devices to minor numbers of the same major
+  * The list is protected by the rwlock
+  */
+-static struct list_head tape_device_list = LIST_HEAD_INIT(tape_device_list);
++static LIST_HEAD(tape_device_list);
+ static DEFINE_RWLOCK(tape_device_lock);
+ 
+ /*
+diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
+index cea49f0..c9b96d5 100644
+--- a/drivers/s390/char/tape_proc.c
++++ b/drivers/s390/char/tape_proc.c
+@@ -97,7 +97,7 @@ static void tape_proc_stop(struct seq_file *m, void *v)
+ {
+ }
+ 
+-static struct seq_operations tape_proc_seq = {
++static const struct seq_operations tape_proc_seq = {
+ 	.start		= tape_proc_start,
+ 	.next		= tape_proc_next,
+ 	.stop		= tape_proc_stop,
+diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
+index e0c4c50..d364e0b 100644
+--- a/drivers/s390/char/vmlogrdr.c
++++ b/drivers/s390/char/vmlogrdr.c
+@@ -683,7 +683,7 @@ static int vmlogrdr_register_driver(void)
+ 	/* Register with iucv driver */
+ 	ret = iucv_register(&vmlogrdr_iucv_handler, 1);
+ 	if (ret) {
+-		printk (KERN_ERR "vmlogrdr: failed to register with"
++		printk (KERN_ERR "vmlogrdr: failed to register with "
+ 			"iucv driver\n");
+ 		goto out;
+ 	}
+diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
+index d70a6e6..7689b50 100644
+--- a/drivers/s390/char/vmur.c
++++ b/drivers/s390/char/vmur.c
+@@ -759,7 +759,7 @@ static loff_t ur_llseek(struct file *file, loff_t offset, int whence)
+ 	return newpos;
+ }
+ 
+-static struct file_operations ur_fops = {
++static const struct file_operations ur_fops = {
+ 	.owner	 = THIS_MODULE,
+ 	.open	 = ur_open,
+ 	.release = ur_release,
+diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
+index 7073daf..f523501 100644
+--- a/drivers/s390/char/zcore.c
++++ b/drivers/s390/char/zcore.c
+@@ -470,7 +470,7 @@ static loff_t zcore_lseek(struct file *file, loff_t offset, int orig)
+ 	return rc;
+ }
+ 
+-static struct file_operations zcore_fops = {
++static const struct file_operations zcore_fops = {
+ 	.owner		= THIS_MODULE,
+ 	.llseek		= zcore_lseek,
+ 	.read		= zcore_read,
+diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
+index 5287631..b7a07a8 100644
+--- a/drivers/s390/cio/airq.c
++++ b/drivers/s390/cio/airq.c
+@@ -1,12 +1,12 @@
+ /*
+  *  drivers/s390/cio/airq.c
+- *   S/390 common I/O routines -- support for adapter interruptions
++ *    Support for adapter interruptions
+  *
+- *    Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
+- *			      IBM Corporation
+- *    Author(s): Ingo Adlung (adlung at de.ibm.com)
+- *		 Cornelia Huck (cornelia.huck at de.ibm.com)
+- *		 Arnd Bergmann (arndb at de.ibm.com)
++ *    Copyright IBM Corp. 1999,2007
++ *    Author(s): Ingo Adlung <adlung at de.ibm.com>
++ *		 Cornelia Huck <cornelia.huck at de.ibm.com>
++ *		 Arnd Bergmann <arndb at de.ibm.com>
++ *		 Peter Oberparleiter <peter.oberparleiter at de.ibm.com>
+  */
+ 
+ #include <linux/init.h>
+@@ -14,72 +14,131 @@
+ #include <linux/slab.h>
+ #include <linux/rcupdate.h>
+ 
++#include <asm/airq.h>
++
++#include "cio.h"
+ #include "cio_debug.h"
+-#include "airq.h"
+ 
+-static adapter_int_handler_t adapter_handler;
++#define NR_AIRQS		32
++#define NR_AIRQS_PER_WORD	sizeof(unsigned long)
++#define NR_AIRQ_WORDS		(NR_AIRQS / NR_AIRQS_PER_WORD)
+ 
+-/*
+- * register for adapter interrupts
+- *
+- * With HiperSockets the zSeries architecture provides for
+- *  means of adapter interrups, pseudo I/O interrupts that are
+- *  not tied to an I/O subchannel, but to an adapter. However,
+- *  it doesn't disclose the info how to enable/disable them, but
+- *  to recognize them only. Perhaps we should consider them
+- *  being shared interrupts, and thus build a linked list
+- *  of adapter handlers ... to be evaluated ...
+- */
+-int
+-s390_register_adapter_interrupt (adapter_int_handler_t handler)
+-{
+-	int ret;
+-	char dbf_txt[15];
++union indicator_t {
++	unsigned long word[NR_AIRQ_WORDS];
++	unsigned char byte[NR_AIRQS];
++} __attribute__((packed));
+ 
+-	CIO_TRACE_EVENT (4, "rgaint");
++struct airq_t {
++	adapter_int_handler_t handler;
++	void *drv_data;
++};
+ 
+-	if (handler == NULL)
+-		ret = -EINVAL;
+-	else
+-		ret = (cmpxchg(&adapter_handler, NULL, handler) ? -EBUSY : 0);
+-	if (!ret)
+-		synchronize_sched();  /* Allow interrupts to complete. */
++static union indicator_t indicators;
++static struct airq_t *airqs[NR_AIRQS];
+ 
+-	sprintf (dbf_txt, "ret:%d", ret);
+-	CIO_TRACE_EVENT (4, dbf_txt);
++static int register_airq(struct airq_t *airq)
++{
++	int i;
+ 
+-	return ret;
++	for (i = 0; i < NR_AIRQS; i++)
++		if (!cmpxchg(&airqs[i], NULL, airq))
++			return i;
++	return -ENOMEM;
+ }
+ 
+-int
+-s390_unregister_adapter_interrupt (adapter_int_handler_t handler)
++/**
++ * s390_register_adapter_interrupt() - register adapter interrupt handler
++ * @handler: adapter handler to be registered
++ * @drv_data: driver data passed with each call to the handler
++ *
++ * Returns:
++ *  Pointer to the indicator to be used on success
++ *  ERR_PTR() if registration failed
++ */
++void *s390_register_adapter_interrupt(adapter_int_handler_t handler,
++				      void *drv_data)
+ {
++	struct airq_t *airq;
++	char dbf_txt[16];
+ 	int ret;
+-	char dbf_txt[15];
+ 
+-	CIO_TRACE_EVENT (4, "urgaint");
+-
+-	if (handler == NULL)
+-		ret = -EINVAL;
+-	else {
+-		adapter_handler = NULL;
+-		synchronize_sched();  /* Allow interrupts to complete. */
+-		ret = 0;
++	airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL);
++	if (!airq) {
++		ret = -ENOMEM;
++		goto out;
+ 	}
+-	sprintf (dbf_txt, "ret:%d", ret);
+-	CIO_TRACE_EVENT (4, dbf_txt);
+-
+-	return ret;
++	airq->handler = handler;
++	airq->drv_data = drv_data;
++	ret = register_airq(airq);
++	if (ret < 0)
++		kfree(airq);
++out:
++	snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret);
++	CIO_TRACE_EVENT(4, dbf_txt);
++	if (ret < 0)
++		return ERR_PTR(ret);
++	else
++		return &indicators.byte[ret];
+ }
++EXPORT_SYMBOL(s390_register_adapter_interrupt);
+ 
+-void
+-do_adapter_IO (void)
++/**
++ * s390_unregister_adapter_interrupt - unregister adapter interrupt handler
++ * @ind: indicator for which the handler is to be unregistered
++ */
++void s390_unregister_adapter_interrupt(void *ind)
+ {
+-	CIO_TRACE_EVENT (6, "doaio");
++	struct airq_t *airq;
++	char dbf_txt[16];
++	int i;
+ 
+-	if (adapter_handler)
+-		(*adapter_handler) ();
++	i = (int) ((addr_t) ind) - ((addr_t) &indicators.byte[0]);
++	snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i);
++	CIO_TRACE_EVENT(4, dbf_txt);
++	indicators.byte[i] = 0;
++	airq = xchg(&airqs[i], NULL);
++	/*
++	 * Allow interrupts to complete. This will ensure that the airq handle
++	 * is no longer referenced by any interrupt handler.
++	 */
++	synchronize_sched();
++	kfree(airq);
+ }
++EXPORT_SYMBOL(s390_unregister_adapter_interrupt);
++
++#define INDICATOR_MASK	(0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8))
+ 
+-EXPORT_SYMBOL (s390_register_adapter_interrupt);
+-EXPORT_SYMBOL (s390_unregister_adapter_interrupt);
++void do_adapter_IO(void)
++{
++	int w;
++	int i;
++	unsigned long word;
++	struct airq_t *airq;
++
++	/*
++	 * Access indicator array in word-sized chunks to minimize storage
++	 * fetch operations.
++	 */
++	for (w = 0; w < NR_AIRQ_WORDS; w++) {
++		word = indicators.word[w];
++		i = w * NR_AIRQS_PER_WORD;
++		/*
++		 * Check bytes within word for active indicators.
++		 */
++		while (word) {
++			if (word & INDICATOR_MASK) {
++				airq = airqs[i];
++				if (likely(airq))
++					airq->handler(&indicators.byte[i],
++						      airq->drv_data);
++				else
++					/*
++					 * Reset ill-behaved indicator.
++					 */
++					indicators.byte[i] = 0;
++			}
++			word <<= 8;
++			i++;
++		}
++	}
++}
+diff --git a/drivers/s390/cio/airq.h b/drivers/s390/cio/airq.h
+deleted file mode 100644
+index 7d6be3f..0000000
+--- a/drivers/s390/cio/airq.h
++++ /dev/null
+@@ -1,10 +0,0 @@
+-#ifndef S390_AINTERRUPT_H
+-#define S390_AINTERRUPT_H
+-
+-typedef	int (*adapter_int_handler_t)(void);
+-
+-extern int s390_register_adapter_interrupt(adapter_int_handler_t handler);
+-extern int s390_unregister_adapter_interrupt(adapter_int_handler_t handler);
+-extern void do_adapter_IO (void);
+-
+-#endif
+diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
+index bd5f16f..e8597ec 100644
+--- a/drivers/s390/cio/blacklist.c
++++ b/drivers/s390/cio/blacklist.c
+@@ -348,7 +348,7 @@ cio_ignore_write(struct file *file, const char __user *user_buf,
+ 	return user_len;
+ }
+ 
+-static struct seq_operations cio_ignore_proc_seq_ops = {
++static const struct seq_operations cio_ignore_proc_seq_ops = {
+ 	.start = cio_ignore_proc_seq_start,
+ 	.stop  = cio_ignore_proc_seq_stop,
+ 	.next  = cio_ignore_proc_seq_next,
+diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
+index 5baa517..3964056 100644
+--- a/drivers/s390/cio/ccwgroup.c
++++ b/drivers/s390/cio/ccwgroup.c
+@@ -35,8 +35,8 @@ ccwgroup_bus_match (struct device * dev, struct device_driver * drv)
+ 	struct ccwgroup_device *gdev;
+ 	struct ccwgroup_driver *gdrv;
+ 
+-	gdev = container_of(dev, struct ccwgroup_device, dev);
+-	gdrv = container_of(drv, struct ccwgroup_driver, driver);
++	gdev = to_ccwgroupdev(dev);
++	gdrv = to_ccwgroupdrv(drv);
+ 
+ 	if (gdev->creator_id == gdrv->driver_id)
+ 		return 1;
+@@ -75,8 +75,10 @@ static void ccwgroup_ungroup_callback(struct device *dev)
+ 	struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
+ 
+ 	mutex_lock(&gdev->reg_mutex);
+-	__ccwgroup_remove_symlinks(gdev);
+-	device_unregister(dev);
++	if (device_is_registered(&gdev->dev)) {
++		__ccwgroup_remove_symlinks(gdev);
++		device_unregister(dev);
++	}
+ 	mutex_unlock(&gdev->reg_mutex);
+ }
+ 
+@@ -111,7 +113,7 @@ ccwgroup_release (struct device *dev)
+ 	gdev = to_ccwgroupdev(dev);
+ 
+ 	for (i = 0; i < gdev->count; i++) {
+-		gdev->cdev[i]->dev.driver_data = NULL;
++		dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
+ 		put_device(&gdev->cdev[i]->dev);
+ 	}
+ 	kfree(gdev);
+@@ -196,11 +198,11 @@ int ccwgroup_create(struct device *root, unsigned int creator_id,
+ 			goto error;
+ 		}
+ 		/* Don't allow a device to belong to more than one group. */
+-		if (gdev->cdev[i]->dev.driver_data) {
++		if (dev_get_drvdata(&gdev->cdev[i]->dev)) {
+ 			rc = -EINVAL;
+ 			goto error;
+ 		}
+-		gdev->cdev[i]->dev.driver_data = gdev;
++		dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
+ 	}
+ 
+ 	gdev->creator_id = creator_id;
+@@ -234,8 +236,8 @@ int ccwgroup_create(struct device *root, unsigned int creator_id,
+ error:
+ 	for (i = 0; i < argc; i++)
+ 		if (gdev->cdev[i]) {
+-			if (gdev->cdev[i]->dev.driver_data == gdev)
+-				gdev->cdev[i]->dev.driver_data = NULL;
++			if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
++				dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
+ 			put_device(&gdev->cdev[i]->dev);
+ 		}
+ 	mutex_unlock(&gdev->reg_mutex);
+@@ -408,6 +410,7 @@ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
+ 	/* register our new driver with the core */
+ 	cdriver->driver.bus = &ccwgroup_bus_type;
+ 	cdriver->driver.name = cdriver->name;
++	cdriver->driver.owner = cdriver->owner;
+ 
+ 	return driver_register(&cdriver->driver);
+ }
+@@ -463,8 +466,8 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
+ {
+ 	struct ccwgroup_device *gdev;
+ 
+-	if (cdev->dev.driver_data) {
+-		gdev = (struct ccwgroup_device *)cdev->dev.driver_data;
++	gdev = dev_get_drvdata(&cdev->dev);
++	if (gdev) {
+ 		if (get_device(&gdev->dev)) {
+ 			mutex_lock(&gdev->reg_mutex);
+ 			if (device_is_registered(&gdev->dev))
+diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
+index 597c0c7..e7ba16a 100644
+--- a/drivers/s390/cio/chsc.c
++++ b/drivers/s390/cio/chsc.c
+@@ -89,7 +89,8 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
+ 	/* Copy data */
+ 	ret = 0;
+ 	memset(ssd, 0, sizeof(struct chsc_ssd_info));
+-	if ((ssd_area->st != 0) && (ssd_area->st != 2))
++	if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
++	    (ssd_area->st != SUBCHANNEL_TYPE_MSG))
+ 		goto out_free;
+ 	ssd->path_mask = ssd_area->path_mask;
+ 	ssd->fla_valid_mask = ssd_area->fla_valid_mask;
+@@ -132,20 +133,16 @@ static void terminate_internal_io(struct subchannel *sch)
+ 	device_set_intretry(sch);
+ 	/* Call handler. */
+ 	if (sch->driver && sch->driver->termination)
+-		sch->driver->termination(&sch->dev);
++		sch->driver->termination(sch);
+ }
+ 
+-static int
+-s390_subchannel_remove_chpid(struct device *dev, void *data)
++static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
+ {
+ 	int j;
+ 	int mask;
+-	struct subchannel *sch;
+-	struct chp_id *chpid;
++	struct chp_id *chpid = data;
+ 	struct schib schib;
+ 
+-	sch = to_subchannel(dev);
+-	chpid = data;
+ 	for (j = 0; j < 8; j++) {
+ 		mask = 0x80 >> j;
+ 		if ((sch->schib.pmcw.pim & mask) &&
+@@ -158,7 +155,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
+ 	spin_lock_irq(sch->lock);
+ 
+ 	stsch(sch->schid, &schib);
+-	if (!schib.pmcw.dnv)
++	if (!css_sch_is_valid(&schib))
+ 		goto out_unreg;
+ 	memcpy(&sch->schib, &schib, sizeof(struct schib));
+ 	/* Check for single path devices. */
+@@ -172,12 +169,12 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
+ 			terminate_internal_io(sch);
+ 			/* Re-start path verification. */
+ 			if (sch->driver && sch->driver->verify)
+-				sch->driver->verify(&sch->dev);
++				sch->driver->verify(sch);
+ 		}
+ 	} else {
+ 		/* trigger path verification. */
+ 		if (sch->driver && sch->driver->verify)
+-			sch->driver->verify(&sch->dev);
++			sch->driver->verify(sch);
+ 		else if (sch->lpm == mask)
+ 			goto out_unreg;
+ 	}
+@@ -201,12 +198,10 @@ void chsc_chp_offline(struct chp_id chpid)
+ 
+ 	if (chp_get_status(chpid) <= 0)
+ 		return;
+-	bus_for_each_dev(&css_bus_type, NULL, &chpid,
+-			 s390_subchannel_remove_chpid);
++	for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
+ }
+ 
+-static int
+-s390_process_res_acc_new_sch(struct subchannel_id schid)
++static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
+ {
+ 	struct schib schib;
+ 	/*
+@@ -252,18 +247,10 @@ static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
+ 	return 0;
+ }
+ 
+-static int
+-__s390_process_res_acc(struct subchannel_id schid, void *data)
++static int __s390_process_res_acc(struct subchannel *sch, void *data)
+ {
+ 	int chp_mask, old_lpm;
+-	struct res_acc_data *res_data;
+-	struct subchannel *sch;
+-
+-	res_data = data;
+-	sch = get_subchannel_by_schid(schid);
+-	if (!sch)
+-		/* Check if a subchannel is newly available. */
+-		return s390_process_res_acc_new_sch(schid);
++	struct res_acc_data *res_data = data;
+ 
+ 	spin_lock_irq(sch->lock);
+ 	chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
+@@ -279,10 +266,10 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
+ 	if (!old_lpm && sch->lpm)
+ 		device_trigger_reprobe(sch);
+ 	else if (sch->driver && sch->driver->verify)
+-		sch->driver->verify(&sch->dev);
++		sch->driver->verify(sch);
+ out:
+ 	spin_unlock_irq(sch->lock);
+-	put_device(&sch->dev);
++
+ 	return 0;
+ }
+ 
+@@ -305,7 +292,8 @@ static void s390_process_res_acc (struct res_acc_data *res_data)
+ 	 * The more information we have (info), the less scanning
+ 	 * will we have to do.
+ 	 */
+-	for_each_subchannel(__s390_process_res_acc, res_data);
++	for_each_subchannel_staged(__s390_process_res_acc,
++				   s390_process_res_acc_new_sch, res_data);
+ }
+ 
+ static int
+@@ -499,8 +487,7 @@ void chsc_process_crw(void)
+ 	} while (sei_area->flags & 0x80);
+ }
+ 
+-static int
+-__chp_add_new_sch(struct subchannel_id schid)
++static int __chp_add_new_sch(struct subchannel_id schid, void *data)
+ {
+ 	struct schib schib;
+ 
+@@ -514,45 +501,37 @@ __chp_add_new_sch(struct subchannel_id schid)
+ }
+ 
+ 
+-static int
+-__chp_add(struct subchannel_id schid, void *data)
++static int __chp_add(struct subchannel *sch, void *data)
+ {
+ 	int i, mask;
+-	struct chp_id *chpid;
+-	struct subchannel *sch;
+-
+-	chpid = data;
+-	sch = get_subchannel_by_schid(schid);
+-	if (!sch)
+-		/* Check if the subchannel is now available. */
+-		return __chp_add_new_sch(schid);
++	struct chp_id *chpid = data;
++
+ 	spin_lock_irq(sch->lock);
+ 	for (i=0; i<8; i++) {
+ 		mask = 0x80 >> i;
+ 		if ((sch->schib.pmcw.pim & mask) &&
+-		    (sch->schib.pmcw.chpid[i] == chpid->id)) {
+-			if (stsch(sch->schid, &sch->schib) != 0) {
+-				/* Endgame. */
+-				spin_unlock_irq(sch->lock);
+-				return -ENXIO;
+-			}
++		    (sch->schib.pmcw.chpid[i] == chpid->id))
+ 			break;
+-		}
+ 	}
+ 	if (i==8) {
+ 		spin_unlock_irq(sch->lock);
+ 		return 0;
+ 	}
++	if (stsch(sch->schid, &sch->schib)) {
++		spin_unlock_irq(sch->lock);
++		css_schedule_eval(sch->schid);
++		return 0;
++	}
+ 	sch->lpm = ((sch->schib.pmcw.pim &
+ 		     sch->schib.pmcw.pam &
+ 		     sch->schib.pmcw.pom)
+ 		    | mask) & sch->opm;
+ 
+ 	if (sch->driver && sch->driver->verify)
+-		sch->driver->verify(&sch->dev);
++		sch->driver->verify(sch);
+ 
+ 	spin_unlock_irq(sch->lock);
+-	put_device(&sch->dev);
++
+ 	return 0;
+ }
+ 
+@@ -564,7 +543,8 @@ void chsc_chp_online(struct chp_id chpid)
+ 	CIO_TRACE_EVENT(2, dbf_txt);
+ 
+ 	if (chp_get_status(chpid) != 0)
+-		for_each_subchannel(__chp_add, &chpid);
++		for_each_subchannel_staged(__chp_add, __chp_add_new_sch,
++					   &chpid);
+ }
+ 
+ static void __s390_subchannel_vary_chpid(struct subchannel *sch,
+@@ -589,7 +569,7 @@ static void __s390_subchannel_vary_chpid(struct subchannel *sch,
+ 			if (!old_lpm)
+ 				device_trigger_reprobe(sch);
+ 			else if (sch->driver && sch->driver->verify)
+-				sch->driver->verify(&sch->dev);
++				sch->driver->verify(sch);
+ 			break;
+ 		}
+ 		sch->opm &= ~mask;
+@@ -603,37 +583,29 @@ static void __s390_subchannel_vary_chpid(struct subchannel *sch,
+ 				terminate_internal_io(sch);
+ 				/* Re-start path verification. */
+ 				if (sch->driver && sch->driver->verify)
+-					sch->driver->verify(&sch->dev);
++					sch->driver->verify(sch);
+ 			}
+ 		} else if (!sch->lpm) {
+ 			if (device_trigger_verify(sch) != 0)
+ 				css_schedule_eval(sch->schid);
+ 		} else if (sch->driver && sch->driver->verify)
+-			sch->driver->verify(&sch->dev);
++			sch->driver->verify(sch);
+ 		break;
+ 	}
+ 	spin_unlock_irqrestore(sch->lock, flags);
+ }
+ 
+-static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
++static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
+ {
+-	struct subchannel *sch;
+-	struct chp_id *chpid;
+-
+-	sch = to_subchannel(dev);
+-	chpid = data;
++	struct chp_id *chpid = data;
+ 
+ 	__s390_subchannel_vary_chpid(sch, *chpid, 0);
+ 	return 0;
+ }
+ 
+-static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
++static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
+ {
+-	struct subchannel *sch;
+-	struct chp_id *chpid;
+-
+-	sch = to_subchannel(dev);
+-	chpid = data;
++	struct chp_id *chpid = data;
+ 
+ 	__s390_subchannel_vary_chpid(sch, *chpid, 1);
+ 	return 0;
+@@ -643,13 +615,7 @@ static int
+ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
+ {
+ 	struct schib schib;
+-	struct subchannel *sch;
+ 
+-	sch = get_subchannel_by_schid(schid);
+-	if (sch) {
+-		put_device(&sch->dev);
+-		return 0;
+-	}
+ 	if (stsch_err(schid, &schib))
+ 		/* We're through */
+ 		return -ENXIO;
+@@ -669,12 +635,13 @@ int chsc_chp_vary(struct chp_id chpid, int on)
+ 	 * Redo PathVerification on the devices the chpid connects to
+ 	 */
+ 
+-	bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
+-			 s390_subchannel_vary_chpid_on :
+-			 s390_subchannel_vary_chpid_off);
+ 	if (on)
+-		/* Scan for new devices on varied on path. */
+-		for_each_subchannel(__s390_vary_chpid_on, NULL);
++		for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
++					   __s390_vary_chpid_on, &chpid);
++	else
++		for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
++					   NULL, &chpid);
++
+ 	return 0;
+ }
+ 
+@@ -1075,7 +1042,7 @@ chsc_determine_css_characteristics(void)
+ 
+ 	scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ 	if (!scsc_area) {
+-		CIO_MSG_EVENT(0, "Was not able to determine available"
++		CIO_MSG_EVENT(0, "Was not able to determine available "
+ 			      "CHSCs due to no memory.\n");
+ 		return -ENOMEM;
+ 	}
+diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
+index 4690534..60590a1 100644
+--- a/drivers/s390/cio/cio.c
++++ b/drivers/s390/cio/cio.c
+@@ -23,11 +23,12 @@
+ #include <asm/reset.h>
+ #include <asm/ipl.h>
+ #include <asm/chpid.h>
+-#include "airq.h"
++#include <asm/airq.h>
+ #include "cio.h"
+ #include "css.h"
+ #include "chsc.h"
+ #include "ioasm.h"
++#include "io_sch.h"
+ #include "blacklist.h"
+ #include "cio_debug.h"
+ #include "chp.h"
+@@ -56,39 +57,37 @@ __setup ("cio_msg=", cio_setup);
+ 
+ /*
+  * Function: cio_debug_init
+- * Initializes three debug logs (under /proc/s390dbf) for common I/O:
+- * - cio_msg logs the messages which are printk'ed when CONFIG_DEBUG_IO is on
++ * Initializes three debug logs for common I/O:
++ * - cio_msg logs generic cio messages
+  * - cio_trace logs the calling of different functions
+- * - cio_crw logs the messages which are printk'ed when CONFIG_DEBUG_CRW is on
+- * debug levels depend on CONFIG_DEBUG_IO resp. CONFIG_DEBUG_CRW
++ * - cio_crw logs machine check related cio messages
+  */
+-static int __init
+-cio_debug_init (void)
++static int __init cio_debug_init(void)
+ {
+-	cio_debug_msg_id = debug_register ("cio_msg", 16, 4, 16*sizeof (long));
++	cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long));
+ 	if (!cio_debug_msg_id)
+ 		goto out_unregister;
+-	debug_register_view (cio_debug_msg_id, &debug_sprintf_view);
+-	debug_set_level (cio_debug_msg_id, 2);
+-	cio_debug_trace_id = debug_register ("cio_trace", 16, 4, 16);
++	debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
++	debug_set_level(cio_debug_msg_id, 2);
++	cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16);
+ 	if (!cio_debug_trace_id)
+ 		goto out_unregister;
+-	debug_register_view (cio_debug_trace_id, &debug_hex_ascii_view);
+-	debug_set_level (cio_debug_trace_id, 2);
+-	cio_debug_crw_id = debug_register ("cio_crw", 4, 4, 16*sizeof (long));
++	debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
++	debug_set_level(cio_debug_trace_id, 2);
++	cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long));
+ 	if (!cio_debug_crw_id)
+ 		goto out_unregister;
+-	debug_register_view (cio_debug_crw_id, &debug_sprintf_view);
+-	debug_set_level (cio_debug_crw_id, 2);
++	debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
++	debug_set_level(cio_debug_crw_id, 4);
+ 	return 0;
+ 
+ out_unregister:
+ 	if (cio_debug_msg_id)
+-		debug_unregister (cio_debug_msg_id);
++		debug_unregister(cio_debug_msg_id);
+ 	if (cio_debug_trace_id)
+-		debug_unregister (cio_debug_trace_id);
++		debug_unregister(cio_debug_trace_id);
+ 	if (cio_debug_crw_id)
+-		debug_unregister (cio_debug_crw_id);
++		debug_unregister(cio_debug_crw_id);
+ 	printk(KERN_WARNING"cio: could not initialize debugging\n");
+ 	return -1;
+ }
+@@ -147,7 +146,7 @@ cio_tpi(void)
+ 	spin_lock(sch->lock);
+ 	memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw));
+ 	if (sch->driver && sch->driver->irq)
+-		sch->driver->irq(&sch->dev);
++		sch->driver->irq(sch);
+ 	spin_unlock(sch->lock);
+ 	irq_exit ();
+ 	_local_bh_enable();
+@@ -184,33 +183,35 @@ cio_start_key (struct subchannel *sch,	/* subchannel structure */
+ {
+ 	char dbf_txt[15];
+ 	int ccode;
++	struct orb *orb;
+ 
+-	CIO_TRACE_EVENT (4, "stIO");
+-	CIO_TRACE_EVENT (4, sch->dev.bus_id);
++	CIO_TRACE_EVENT(4, "stIO");
++	CIO_TRACE_EVENT(4, sch->dev.bus_id);
+ 
++	orb = &to_io_private(sch)->orb;
+ 	/* sch is always under 2G. */
+-	sch->orb.intparm = (__u32)(unsigned long)sch;
+-	sch->orb.fmt = 1;
++	orb->intparm = (u32)(addr_t)sch;
++	orb->fmt = 1;
+ 
+-	sch->orb.pfch = sch->options.prefetch == 0;
+-	sch->orb.spnd = sch->options.suspend;
+-	sch->orb.ssic = sch->options.suspend && sch->options.inter;
+-	sch->orb.lpm = (lpm != 0) ? lpm : sch->lpm;
++	orb->pfch = sch->options.prefetch == 0;
++	orb->spnd = sch->options.suspend;
++	orb->ssic = sch->options.suspend && sch->options.inter;
++	orb->lpm = (lpm != 0) ? lpm : sch->lpm;
+ #ifdef CONFIG_64BIT
+ 	/*
+ 	 * for 64 bit we always support 64 bit IDAWs with 4k page size only
+ 	 */
+-	sch->orb.c64 = 1;
+-	sch->orb.i2k = 0;
++	orb->c64 = 1;
++	orb->i2k = 0;
+ #endif
+-	sch->orb.key = key >> 4;
++	orb->key = key >> 4;
+ 	/* issue "Start Subchannel" */
+-	sch->orb.cpa = (__u32) __pa (cpa);
+-	ccode = ssch (sch->schid, &sch->orb);
++	orb->cpa = (__u32) __pa(cpa);
++	ccode = ssch(sch->schid, orb);
+ 
+ 	/* process condition code */
+-	sprintf (dbf_txt, "ccode:%d", ccode);
+-	CIO_TRACE_EVENT (4, dbf_txt);
++	sprintf(dbf_txt, "ccode:%d", ccode);
++	CIO_TRACE_EVENT(4, dbf_txt);
+ 
+ 	switch (ccode) {
+ 	case 0:
+@@ -405,8 +406,8 @@ cio_modify (struct subchannel *sch)
+ /*
+  * Enable subchannel.
+  */
+-int
+-cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
++int cio_enable_subchannel(struct subchannel *sch, unsigned int isc,
++			  u32 intparm)
+ {
+ 	char dbf_txt[15];
+ 	int ccode;
+@@ -425,7 +426,7 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
+ 	for (retry = 5, ret = 0; retry > 0; retry--) {
+ 		sch->schib.pmcw.ena = 1;
+ 		sch->schib.pmcw.isc = isc;
+-		sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
++		sch->schib.pmcw.intparm = intparm;
+ 		ret = cio_modify(sch);
+ 		if (ret == -ENODEV)
+ 			break;
+@@ -567,7 +568,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
+ 	 */
+ 	if (sch->st != 0) {
+ 		CIO_DEBUG(KERN_INFO, 0,
+-			  "cio: Subchannel 0.%x.%04x reports "
++			  "Subchannel 0.%x.%04x reports "
+ 			  "non-I/O subchannel type %04X\n",
+ 			  sch->schid.ssid, sch->schid.sch_no, sch->st);
+ 		/* We stop here for non-io subchannels. */
+@@ -576,11 +577,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
+ 	}
+ 
+ 	/* Initialization for io subchannels. */
+-	if (!sch->schib.pmcw.dnv) {
+-		/* io subchannel but device number is invalid. */
++	if (!css_sch_is_valid(&sch->schib)) {
+ 		err = -ENODEV;
+ 		goto out;
+ 	}
++
+ 	/* Devno is valid. */
+ 	if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
+ 		/*
+@@ -600,7 +601,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
+ 	sch->lpm = sch->schib.pmcw.pam & sch->opm;
+ 
+ 	CIO_DEBUG(KERN_INFO, 0,
+-		  "cio: Detected device %04x on subchannel 0.%x.%04X"
++		  "Detected device %04x on subchannel 0.%x.%04X"
+ 		  " - PIM = %02X, PAM = %02X, POM = %02X\n",
+ 		  sch->schib.pmcw.dev, sch->schid.ssid,
+ 		  sch->schid.sch_no, sch->schib.pmcw.pim,
+@@ -680,7 +681,7 @@ do_IRQ (struct pt_regs *regs)
+ 				sizeof (irb->scsw));
+ 			/* Call interrupt handler if there is one. */
+ 			if (sch->driver && sch->driver->irq)
+-				sch->driver->irq(&sch->dev);
++				sch->driver->irq(sch);
+ 		}
+ 		if (sch)
+ 			spin_unlock(sch->lock);
+@@ -698,8 +699,14 @@ do_IRQ (struct pt_regs *regs)
+ 
+ #ifdef CONFIG_CCW_CONSOLE
+ static struct subchannel console_subchannel;
++static struct io_subchannel_private console_priv;
+ static int console_subchannel_in_use;
+ 
++void *cio_get_console_priv(void)
++{
++	return &console_priv;
++}
++
+ /*
+  * busy wait for the next interrupt on the console
+  */
+@@ -738,9 +745,9 @@ cio_test_for_console(struct subchannel_id schid, void *data)
+ {
+ 	if (stsch_err(schid, &console_subchannel.schib) != 0)
+ 		return -ENXIO;
+-	if (console_subchannel.schib.pmcw.dnv &&
+-	    console_subchannel.schib.pmcw.dev ==
+-	    console_devno) {
++	if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) &&
++	    console_subchannel.schib.pmcw.dnv &&
++	    (console_subchannel.schib.pmcw.dev == console_devno)) {
+ 		console_irq = schid.sch_no;
+ 		return 1; /* found */
+ 	}
+@@ -758,6 +765,7 @@ cio_get_console_sch_no(void)
+ 		/* VM provided us with the irq number of the console. */
+ 		schid.sch_no = console_irq;
+ 		if (stsch(schid, &console_subchannel.schib) != 0 ||
++		    (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) ||
+ 		    !console_subchannel.schib.pmcw.dnv)
+ 			return -1;
+ 		console_devno = console_subchannel.schib.pmcw.dev;
+@@ -804,7 +812,7 @@ cio_probe_console(void)
+ 	ctl_set_bit(6, 24);
+ 	console_subchannel.schib.pmcw.isc = 7;
+ 	console_subchannel.schib.pmcw.intparm =
+-		(__u32)(unsigned long)&console_subchannel;
++		(u32)(addr_t)&console_subchannel;
+ 	ret = cio_modify(&console_subchannel);
+ 	if (ret) {
+ 		console_subchannel_in_use = 0;
+@@ -1022,7 +1030,7 @@ static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
+ 
+ 	if (stsch_reset(schid, &schib))
+ 		return -ENXIO;
+-	if (schib.pmcw.dnv &&
++	if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
+ 	    (schib.pmcw.dev == match_id->devid.devno) &&
+ 	    (schid.ssid == match_id->devid.ssid)) {
+ 		match_id->schid = schid;
+@@ -1068,6 +1076,8 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
+ 		return -ENODEV;
+ 	if (stsch(schid, &schib))
+ 		return -ENODEV;
++	if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
++		return -ENODEV;
+ 	if (!schib.pmcw.dnv)
+ 		return -ENODEV;
+ 	iplinfo->devno = schib.pmcw.dev;
+diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
+index 7446c39..52afa4c 100644
+--- a/drivers/s390/cio/cio.h
++++ b/drivers/s390/cio/cio.h
+@@ -11,32 +11,32 @@
+  * path management control word
+  */
+ struct pmcw {
+-	__u32 intparm;		/* interruption parameter */
+-	__u32 qf   : 1;		/* qdio facility */
+-	__u32 res0 : 1;		/* reserved zeros */
+-	__u32 isc  : 3;		/* interruption sublass */
+-	__u32 res5 : 3;		/* reserved zeros */
+-	__u32 ena  : 1;		/* enabled */
+-	__u32 lm   : 2;		/* limit mode */
+-	__u32 mme  : 2;		/* measurement-mode enable */
+-	__u32 mp   : 1;		/* multipath mode */
+-	__u32 tf   : 1;		/* timing facility */
+-	__u32 dnv  : 1;		/* device number valid */
+-	__u32 dev  : 16;	/* device number */
+-	__u8  lpm;		/* logical path mask */
+-	__u8  pnom;		/* path not operational mask */
+-	__u8  lpum;		/* last path used mask */
+-	__u8  pim;		/* path installed mask */
+-	__u16 mbi;		/* measurement-block index */
+-	__u8  pom;		/* path operational mask */
+-	__u8  pam;		/* path available mask */
+-	__u8  chpid[8];		/* CHPID 0-7 (if available) */
+-	__u32 unused1 : 8;	/* reserved zeros */
+-	__u32 st      : 3;	/* subchannel type */
+-	__u32 unused2 : 18;	/* reserved zeros */
+-	__u32 mbfc    : 1;      /* measurement block format control */
+-	__u32 xmwme   : 1;      /* extended measurement word mode enable */
+-	__u32 csense  : 1;	/* concurrent sense; can be enabled ...*/
++	u32 intparm;		/* interruption parameter */
++	u32 qf	 : 1;		/* qdio facility */
++	u32 res0 : 1;		/* reserved zeros */
++	u32 isc  : 3;		/* interruption sublass */
++	u32 res5 : 3;		/* reserved zeros */
++	u32 ena  : 1;		/* enabled */
++	u32 lm	 : 2;		/* limit mode */
++	u32 mme  : 2;		/* measurement-mode enable */
++	u32 mp	 : 1;		/* multipath mode */
++	u32 tf	 : 1;		/* timing facility */
++	u32 dnv  : 1;		/* device number valid */
++	u32 dev  : 16;		/* device number */
++	u8  lpm;		/* logical path mask */
++	u8  pnom;		/* path not operational mask */
++	u8  lpum;		/* last path used mask */
++	u8  pim;		/* path installed mask */
++	u16 mbi;		/* measurement-block index */
++	u8  pom;		/* path operational mask */
++	u8  pam;		/* path available mask */
++	u8  chpid[8];		/* CHPID 0-7 (if available) */
++	u32 unused1 : 8;	/* reserved zeros */
++	u32 st	    : 3;	/* subchannel type */
++	u32 unused2 : 18;	/* reserved zeros */
++	u32 mbfc    : 1;	/* measurement block format control */
++	u32 xmwme   : 1;	/* extended measurement word mode enable */
++	u32 csense  : 1;	/* concurrent sense; can be enabled ...*/
+ 				/*  ... per MSCH, however, if facility */
+ 				/*  ... is not installed, this results */
+ 				/*  ... in an operand exception.       */
+@@ -52,31 +52,6 @@ struct schib {
+ 	__u8 mda[4];		 /* model dependent area */
+ } __attribute__ ((packed,aligned(4)));
+ 
+-/*
+- * operation request block
+- */
+-struct orb {
+-	__u32 intparm;		/* interruption parameter */
+-	__u32 key  : 4; 	/* flags, like key, suspend control, etc. */
+-	__u32 spnd : 1; 	/* suspend control */
+-	__u32 res1 : 1; 	/* reserved */
+-	__u32 mod  : 1; 	/* modification control */
+-	__u32 sync : 1; 	/* synchronize control */
+-	__u32 fmt  : 1; 	/* format control */
+-	__u32 pfch : 1; 	/* prefetch control */
+-	__u32 isic : 1; 	/* initial-status-interruption control */
+-	__u32 alcc : 1; 	/* address-limit-checking control */
+-	__u32 ssic : 1; 	/* suppress-suspended-interr. control */
+-	__u32 res2 : 1; 	/* reserved */
+-	__u32 c64  : 1; 	/* IDAW/QDIO 64 bit control  */
+-	__u32 i2k  : 1; 	/* IDAW 2/4kB block size control */
+-	__u32 lpm  : 8; 	/* logical path mask */
+-	__u32 ils  : 1; 	/* incorrect length */
+-	__u32 zero : 6; 	/* reserved zeros */
+-	__u32 orbx : 1; 	/* ORB extension control */
+-	__u32 cpa;		/* channel program address */
+-}  __attribute__ ((packed,aligned(4)));
+-
+ /* subchannel data structure used by I/O subroutines */
+ struct subchannel {
+ 	struct subchannel_id schid;
+@@ -85,7 +60,7 @@ struct subchannel {
+ 	enum {
+ 		SUBCHANNEL_TYPE_IO = 0,
+ 		SUBCHANNEL_TYPE_CHSC = 1,
+-		SUBCHANNEL_TYPE_MESSAGE = 2,
++		SUBCHANNEL_TYPE_MSG = 2,
+ 		SUBCHANNEL_TYPE_ADM = 3,
+ 	} st;			/* subchannel type */
+ 
+@@ -99,11 +74,10 @@ struct subchannel {
+ 	__u8 lpm;		/* logical path mask */
+ 	__u8 opm;               /* operational path mask */
+ 	struct schib schib;	/* subchannel information block */
+-	struct orb orb;		/* operation request block */
+-	struct ccw1 sense_ccw;	/* static ccw for sense command */
+ 	struct chsc_ssd_info ssd_info;	/* subchannel description */
+ 	struct device dev;	/* entry in device tree */
+ 	struct css_driver *driver;
++	void *private; /* private per subchannel type data */
+ } __attribute__ ((aligned(8)));
+ 
+ #define IO_INTERRUPT_TYPE	   0 /* I/O interrupt type */
+@@ -111,7 +85,7 @@ struct subchannel {
+ #define to_subchannel(n) container_of(n, struct subchannel, dev)
+ 
+ extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
+-extern int cio_enable_subchannel (struct subchannel *, unsigned int);
++extern int cio_enable_subchannel(struct subchannel *, unsigned int, u32);
+ extern int cio_disable_subchannel (struct subchannel *);
+ extern int cio_cancel (struct subchannel *);
+ extern int cio_clear (struct subchannel *);
+@@ -125,6 +99,7 @@ extern int cio_get_options (struct subchannel *);
+ extern int cio_modify (struct subchannel *);
+ 
+ int cio_create_sch_lock(struct subchannel *);
++void do_adapter_IO(void);
+ 
+ /* Use with care. */
+ #ifdef CONFIG_CCW_CONSOLE
+@@ -133,10 +108,12 @@ extern void cio_release_console(void);
+ extern int cio_is_console(struct subchannel_id);
+ extern struct subchannel *cio_get_console_subchannel(void);
+ extern spinlock_t * cio_get_console_lock(void);
++extern void *cio_get_console_priv(void);
+ #else
+ #define cio_is_console(schid) 0
+ #define cio_get_console_subchannel() NULL
+-#define cio_get_console_lock() NULL;
++#define cio_get_console_lock() NULL
++#define cio_get_console_priv() NULL
+ #endif
+ 
+ extern int cio_show_msg;
+diff --git a/drivers/s390/cio/cio_debug.h b/drivers/s390/cio/cio_debug.h
+index c9bf898..d7429ef 100644
+--- a/drivers/s390/cio/cio_debug.h
++++ b/drivers/s390/cio/cio_debug.h
+@@ -8,20 +8,19 @@ extern debug_info_t *cio_debug_msg_id;
+ extern debug_info_t *cio_debug_trace_id;
+ extern debug_info_t *cio_debug_crw_id;
+ 
+-#define CIO_TRACE_EVENT(imp, txt) do { \
++#define CIO_TRACE_EVENT(imp, txt) do {				\
+ 		debug_text_event(cio_debug_trace_id, imp, txt); \
+ 	} while (0)
+ 
+-#define CIO_MSG_EVENT(imp, args...) do { \
+-		debug_sprintf_event(cio_debug_msg_id, imp , ##args); \
++#define CIO_MSG_EVENT(imp, args...) do {				\
++		debug_sprintf_event(cio_debug_msg_id, imp , ##args);	\
+ 	} while (0)
+ 
+-#define CIO_CRW_EVENT(imp, args...) do { \
+-		debug_sprintf_event(cio_debug_crw_id, imp , ##args); \
++#define CIO_CRW_EVENT(imp, args...) do {				\
++		debug_sprintf_event(cio_debug_crw_id, imp , ##args);	\
+ 	} while (0)
+ 
+-static inline void
+-CIO_HEX_EVENT(int level, void *data, int length)
++static inline void CIO_HEX_EVENT(int level, void *data, int length)
+ {
+ 	if (unlikely(!cio_debug_trace_id))
+ 		return;
+@@ -32,9 +31,10 @@ CIO_HEX_EVENT(int level, void *data, int length)
+ 	}
+ }
+ 
+-#define CIO_DEBUG(printk_level,event_level,msg...) ({ \
+-	if (cio_show_msg) printk(printk_level msg); \
+-	CIO_MSG_EVENT (event_level, msg); \
+-})
++#define CIO_DEBUG(printk_level, event_level, msg...) do {	\
++		if (cio_show_msg)				\
++			printk(printk_level "cio: " msg);	\
++		CIO_MSG_EVENT(event_level, msg);		\
++	} while (0)
+ 
+ #endif
+diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
+index c3df2cd..3b45bbe 100644
+--- a/drivers/s390/cio/css.c
++++ b/drivers/s390/cio/css.c
+@@ -51,6 +51,62 @@ for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
+ 	return ret;
+ }
+ 
++struct cb_data {
++	void *data;
++	struct idset *set;
++	int (*fn_known_sch)(struct subchannel *, void *);
++	int (*fn_unknown_sch)(struct subchannel_id, void *);
++};
++
++static int call_fn_known_sch(struct device *dev, void *data)
++{
++	struct subchannel *sch = to_subchannel(dev);
++	struct cb_data *cb = data;
++	int rc = 0;
++
++	idset_sch_del(cb->set, sch->schid);
++	if (cb->fn_known_sch)
++		rc = cb->fn_known_sch(sch, cb->data);
++	return rc;
++}
++
++static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
++{
++	struct cb_data *cb = data;
++	int rc = 0;
++
++	if (idset_sch_contains(cb->set, schid))
++		rc = cb->fn_unknown_sch(schid, cb->data);
++	return rc;
++}
++
++int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
++			       int (*fn_unknown)(struct subchannel_id,
++			       void *), void *data)
++{
++	struct cb_data cb;
++	int rc;
++
++	cb.set = idset_sch_new();
++	if (!cb.set)
++		return -ENOMEM;
++	idset_fill(cb.set);
++	cb.data = data;
++	cb.fn_known_sch = fn_known;
++	cb.fn_unknown_sch = fn_unknown;
++	/* Process registered subchannels. */
++	rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
++	if (rc)
++		goto out;
++	/* Process unregistered subchannels. */
++	if (fn_unknown)
++		rc = for_each_subchannel(call_fn_unknown_sch, &cb);
++out:
++	idset_free(cb.set);
++
++	return rc;
++}
++
+ static struct subchannel *
+ css_alloc_subchannel(struct subchannel_id schid)
+ {
+@@ -77,7 +133,7 @@ css_alloc_subchannel(struct subchannel_id schid)
+ 	 * This is fine even on 64bit since the subchannel is always located
+ 	 * under 2G.
+ 	 */
+-	sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
++	sch->schib.pmcw.intparm = (u32)(addr_t)sch;
+ 	ret = cio_modify(sch);
+ 	if (ret) {
+ 		kfree(sch->lock);
+@@ -237,11 +293,25 @@ get_subchannel_by_schid(struct subchannel_id schid)
+ 	return dev ? to_subchannel(dev) : NULL;
+ }
+ 
++/**
++ * css_sch_is_valid() - check if a subchannel is valid
++ * @schib: subchannel information block for the subchannel
++ */
++int css_sch_is_valid(struct schib *schib)
++{
++	if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
++		return 0;
++	return 1;
++}
++EXPORT_SYMBOL_GPL(css_sch_is_valid);
++
+ static int css_get_subchannel_status(struct subchannel *sch)
+ {
+ 	struct schib schib;
+ 
+-	if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
++	if (stsch(sch->schid, &schib))
++		return CIO_GONE;
++	if (!css_sch_is_valid(&schib))
+ 		return CIO_GONE;
+ 	if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
+ 		return CIO_REVALIDATE;
+@@ -293,7 +363,7 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
+ 		action = UNREGISTER;
+ 		if (sch->driver && sch->driver->notify) {
+ 			spin_unlock_irqrestore(sch->lock, flags);
+-			ret = sch->driver->notify(&sch->dev, event);
++			ret = sch->driver->notify(sch, event);
+ 			spin_lock_irqsave(sch->lock, flags);
+ 			if (ret)
+ 				action = NONE;
+@@ -349,7 +419,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
+ 		/* Will be done on the slow path. */
+ 		return -EAGAIN;
+ 	}
+-	if (stsch_err(schid, &schib) || !schib.pmcw.dnv) {
++	if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
+ 		/* Unusable - ignore. */
+ 		return 0;
+ 	}
+@@ -388,20 +458,56 @@ static int __init slow_subchannel_init(void)
+ 	return 0;
+ }
+ 
+-static void css_slow_path_func(struct work_struct *unused)
++static int slow_eval_known_fn(struct subchannel *sch, void *data)
+ {
+-	struct subchannel_id schid;
++	int eval;
++	int rc;
+ 
+-	CIO_TRACE_EVENT(4, "slowpath");
+ 	spin_lock_irq(&slow_subchannel_lock);
+-	init_subchannel_id(&schid);
+-	while (idset_sch_get_first(slow_subchannel_set, &schid)) {
+-		idset_sch_del(slow_subchannel_set, schid);
+-		spin_unlock_irq(&slow_subchannel_lock);
+-		css_evaluate_subchannel(schid, 1);
+-		spin_lock_irq(&slow_subchannel_lock);
++	eval = idset_sch_contains(slow_subchannel_set, sch->schid);
++	idset_sch_del(slow_subchannel_set, sch->schid);
++	spin_unlock_irq(&slow_subchannel_lock);
++	if (eval) {
++		rc = css_evaluate_known_subchannel(sch, 1);
++		if (rc == -EAGAIN)
++			css_schedule_eval(sch->schid);
+ 	}
++	return 0;
++}
++
++static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
++{
++	int eval;
++	int rc = 0;
++
++	spin_lock_irq(&slow_subchannel_lock);
++	eval = idset_sch_contains(slow_subchannel_set, schid);
++	idset_sch_del(slow_subchannel_set, schid);
+ 	spin_unlock_irq(&slow_subchannel_lock);
++	if (eval) {
++		rc = css_evaluate_new_subchannel(schid, 1);
++		switch (rc) {
++		case -EAGAIN:
++			css_schedule_eval(schid);
++			rc = 0;
++			break;
++		case -ENXIO:
++		case -ENOMEM:
++		case -EIO:
++			/* These should abort looping */
++			break;
++		default:
++			rc = 0;
++		}
++	}
++	return rc;
++}
++
++static void css_slow_path_func(struct work_struct *unused)
++{
++	CIO_TRACE_EVENT(4, "slowpath");
++	for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
++				   NULL);
+ }
+ 
+ static DECLARE_WORK(slow_path_work, css_slow_path_func);
+@@ -430,7 +536,6 @@ void css_schedule_eval_all(void)
+ /* Reprobe subchannel if unregistered. */
+ static int reprobe_subchannel(struct subchannel_id schid, void *data)
+ {
+-	struct subchannel *sch;
+ 	int ret;
+ 
+ 	CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
+@@ -438,13 +543,6 @@ static int reprobe_subchannel(struct subchannel_id schid, void *data)
+ 	if (need_reprobe)
+ 		return -EAGAIN;
+ 
+-	sch = get_subchannel_by_schid(schid);
+-	if (sch) {
+-		/* Already known. */
+-		put_device(&sch->dev);
+-		return 0;
+-	}
+-
+ 	ret = css_probe_device(schid);
+ 	switch (ret) {
+ 	case 0:
+@@ -472,7 +570,7 @@ static void reprobe_all(struct work_struct *unused)
+ 	/* Make sure initial subchannel scan is done. */
+ 	wait_event(ccw_device_init_wq,
+ 		   atomic_read(&ccw_device_init_count) == 0);
+-	ret = for_each_subchannel(reprobe_subchannel, NULL);
++	ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
+ 
+ 	CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
+ 		      need_reprobe);
+@@ -787,8 +885,8 @@ int sch_is_pseudo_sch(struct subchannel *sch)
+ static int
+ css_bus_match (struct device *dev, struct device_driver *drv)
+ {
+-	struct subchannel *sch = container_of (dev, struct subchannel, dev);
+-	struct css_driver *driver = container_of (drv, struct css_driver, drv);
++	struct subchannel *sch = to_subchannel(dev);
++	struct css_driver *driver = to_cssdriver(drv);
+ 
+ 	if (sch->st == driver->subchannel_type)
+ 		return 1;
+@@ -796,32 +894,36 @@ css_bus_match (struct device *dev, struct device_driver *drv)
+ 	return 0;
+ }
+ 
+-static int
+-css_probe (struct device *dev)
++static int css_probe(struct device *dev)
+ {
+ 	struct subchannel *sch;
++	int ret;
+ 
+ 	sch = to_subchannel(dev);
+-	sch->driver = container_of (dev->driver, struct css_driver, drv);
+-	return (sch->driver->probe ? sch->driver->probe(sch) : 0);
++	sch->driver = to_cssdriver(dev->driver);
++	ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
++	if (ret)
++		sch->driver = NULL;
++	return ret;
+ }
+ 
+-static int
+-css_remove (struct device *dev)
++static int css_remove(struct device *dev)
+ {
+ 	struct subchannel *sch;
++	int ret;
+ 
+ 	sch = to_subchannel(dev);
+-	return (sch->driver->remove ? sch->driver->remove(sch) : 0);
++	ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
++	sch->driver = NULL;
++	return ret;
+ }
+ 
+-static void
+-css_shutdown (struct device *dev)
++static void css_shutdown(struct device *dev)
+ {
+ 	struct subchannel *sch;
+ 
+ 	sch = to_subchannel(dev);
+-	if (sch->driver->shutdown)
++	if (sch->driver && sch->driver->shutdown)
+ 		sch->driver->shutdown(sch);
+ }
+ 
+@@ -833,6 +935,34 @@ struct bus_type css_bus_type = {
+ 	.shutdown = css_shutdown,
+ };
+ 
++/**
++ * css_driver_register - register a css driver
++ * @cdrv: css driver to register
++ *
++ * This is mainly a wrapper around driver_register that sets name
++ * and bus_type in the embedded struct device_driver correctly.
++ */
++int css_driver_register(struct css_driver *cdrv)
++{
++	cdrv->drv.name = cdrv->name;
++	cdrv->drv.bus = &css_bus_type;
++	cdrv->drv.owner = cdrv->owner;
++	return driver_register(&cdrv->drv);
++}
++EXPORT_SYMBOL_GPL(css_driver_register);
++
++/**
++ * css_driver_unregister - unregister a css driver
++ * @cdrv: css driver to unregister
++ *
++ * This is a wrapper around driver_unregister.
++ */
++void css_driver_unregister(struct css_driver *cdrv)
++{
++	driver_unregister(&cdrv->drv);
++}
++EXPORT_SYMBOL_GPL(css_driver_unregister);
++
+ subsys_initcall(init_channel_subsystem);
+ 
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
+index 81215ef..b705545 100644
+--- a/drivers/s390/cio/css.h
++++ b/drivers/s390/cio/css.h
+@@ -58,64 +58,6 @@ struct pgid {
+ 	__u32 tod_high;		/* high word TOD clock */
+ } __attribute__ ((packed));
+ 
+-#define MAX_CIWS 8
+-
+-/*
+- * sense-id response buffer layout
+- */
+-struct senseid {
+-	/* common part */
+-	__u8  reserved;     	/* always 0x'FF' */
+-	__u16 cu_type;	     	/* control unit type */
+-	__u8  cu_model;     	/* control unit model */
+-	__u16 dev_type;     	/* device type */
+-	__u8  dev_model;    	/* device model */
+-	__u8  unused;	     	/* padding byte */
+-	/* extended part */
+-	struct ciw ciw[MAX_CIWS];	/* variable # of CIWs */
+-}  __attribute__ ((packed,aligned(4)));
+-
+-struct ccw_device_private {
+-	struct ccw_device *cdev;
+-	struct subchannel *sch;
+-	int state;		/* device state */
+-	atomic_t onoff;
+-	unsigned long registered;
+-	struct ccw_dev_id dev_id;	/* device id */
+-	struct subchannel_id schid;	/* subchannel number */
+-	__u8 imask;		/* lpm mask for SNID/SID/SPGID */
+-	int iretry;		/* retry counter SNID/SID/SPGID */
+-	struct {
+-		unsigned int fast:1;	/* post with "channel end" */
+-		unsigned int repall:1;	/* report every interrupt status */
+-		unsigned int pgroup:1;  /* do path grouping */
+-		unsigned int force:1;   /* allow forced online */
+-	} __attribute__ ((packed)) options;
+-	struct {
+-		unsigned int pgid_single:1; /* use single path for Set PGID */
+-		unsigned int esid:1;        /* Ext. SenseID supported by HW */
+-		unsigned int dosense:1;	    /* delayed SENSE required */
+-		unsigned int doverify:1;    /* delayed path verification */
+-		unsigned int donotify:1;    /* call notify function */
+-		unsigned int recog_done:1;  /* dev. recog. complete */
+-		unsigned int fake_irb:1;    /* deliver faked irb */
+-		unsigned int intretry:1;    /* retry internal operation */
+-	} __attribute__((packed)) flags;
+-	unsigned long intparm;	/* user interruption parameter */
+-	struct qdio_irq *qdio_data;
+-	struct irb irb;		/* device status */
+-	struct senseid senseid;	/* SenseID info */
+-	struct pgid pgid[8];	/* path group IDs per chpid*/
+-	struct ccw1 iccws[2];	/* ccws for SNID/SID/SPGID commands */
+-	struct work_struct kick_work;
+-	wait_queue_head_t wait_q;
+-	struct timer_list timer;
+-	void *cmb;			/* measurement information */
+-	struct list_head cmb_list;	/* list of measured devices */
+-	u64 cmb_start_time;		/* clock value of cmb reset */
+-	void *cmb_wait;			/* deferred cmb enable/disable */
+-};
+-
+ /*
+  * A css driver handles all subchannels of one type.
+  * Currently, we only care about I/O subchannels (type 0), these
+@@ -123,25 +65,35 @@ struct ccw_device_private {
+  */
+ struct subchannel;
+ struct css_driver {
++	struct module *owner;
+ 	unsigned int subchannel_type;
+ 	struct device_driver drv;
+-	void (*irq)(struct device *);
+-	int (*notify)(struct device *, int);
+-	void (*verify)(struct device *);
+-	void (*termination)(struct device *);
++	void (*irq)(struct subchannel *);
++	int (*notify)(struct subchannel *, int);
++	void (*verify)(struct subchannel *);
++	void (*termination)(struct subchannel *);
+ 	int (*probe)(struct subchannel *);
+ 	int (*remove)(struct subchannel *);
+ 	void (*shutdown)(struct subchannel *);
++	const char *name;
+ };
+ 
++#define to_cssdriver(n) container_of(n, struct css_driver, drv)
++
+ /*
+  * all css_drivers have the css_bus_type
+  */
+ extern struct bus_type css_bus_type;
+ 
++extern int css_driver_register(struct css_driver *);
++extern void css_driver_unregister(struct css_driver *);
++
+ extern void css_sch_device_unregister(struct subchannel *);
+ extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
+ extern int css_init_done;
++int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
++			       int (*fn_unknown)(struct subchannel_id,
++			       void *), void *data);
+ extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
+ extern void css_process_crw(int, int);
+ extern void css_reiterate_subchannels(void);
+@@ -188,6 +140,8 @@ void css_schedule_eval(struct subchannel_id schid);
+ void css_schedule_eval_all(void);
+ 
+ int sch_is_pseudo_sch(struct subchannel *);
++struct schib;
++int css_sch_is_valid(struct schib *);
+ 
+ extern struct workqueue_struct *slow_path_wq;
+ 
+diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
+index 74f6b53..d35dc3f 100644
+--- a/drivers/s390/cio/device.c
++++ b/drivers/s390/cio/device.c
+@@ -17,6 +17,7 @@
+ #include <linux/list.h>
+ #include <linux/device.h>
+ #include <linux/workqueue.h>
++#include <linux/timer.h>
+ 
+ #include <asm/ccwdev.h>
+ #include <asm/cio.h>
+@@ -28,6 +29,12 @@
+ #include "css.h"
+ #include "device.h"
+ #include "ioasm.h"
++#include "io_sch.h"
++
++static struct timer_list recovery_timer;
++static spinlock_t recovery_lock;
++static int recovery_phase;
++static const unsigned long recovery_delay[] = { 3, 30, 300 };
+ 
+ /******************* bus type handling ***********************/
+ 
+@@ -115,19 +122,18 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
+ 
+ struct bus_type ccw_bus_type;
+ 
+-static int io_subchannel_probe (struct subchannel *);
+-static int io_subchannel_remove (struct subchannel *);
+-static int io_subchannel_notify(struct device *, int);
+-static void io_subchannel_verify(struct device *);
+-static void io_subchannel_ioterm(struct device *);
++static void io_subchannel_irq(struct subchannel *);
++static int io_subchannel_probe(struct subchannel *);
++static int io_subchannel_remove(struct subchannel *);
++static int io_subchannel_notify(struct subchannel *, int);
++static void io_subchannel_verify(struct subchannel *);
++static void io_subchannel_ioterm(struct subchannel *);
+ static void io_subchannel_shutdown(struct subchannel *);
+ 
+ static struct css_driver io_subchannel_driver = {
++	.owner = THIS_MODULE,
+ 	.subchannel_type = SUBCHANNEL_TYPE_IO,
+-	.drv = {
+-		.name = "io_subchannel",
+-		.bus  = &css_bus_type,
+-	},
++	.name = "io_subchannel",
+ 	.irq = io_subchannel_irq,
+ 	.notify = io_subchannel_notify,
+ 	.verify = io_subchannel_verify,
+@@ -142,6 +148,8 @@ struct workqueue_struct *ccw_device_notify_work;
+ wait_queue_head_t ccw_device_init_wq;
+ atomic_t ccw_device_init_count;
+ 
++static void recovery_func(unsigned long data);
++
+ static int __init
+ init_ccw_bus_type (void)
+ {
+@@ -149,6 +157,7 @@ init_ccw_bus_type (void)
+ 
+ 	init_waitqueue_head(&ccw_device_init_wq);
+ 	atomic_set(&ccw_device_init_count, 0);
++	setup_timer(&recovery_timer, recovery_func, 0);
+ 
+ 	ccw_device_work = create_singlethread_workqueue("cio");
+ 	if (!ccw_device_work)
+@@ -166,7 +175,8 @@ init_ccw_bus_type (void)
+ 	if ((ret = bus_register (&ccw_bus_type)))
+ 		goto out_err;
+ 
+-	if ((ret = driver_register(&io_subchannel_driver.drv)))
++	ret = css_driver_register(&io_subchannel_driver);
++	if (ret)
+ 		goto out_err;
+ 
+ 	wait_event(ccw_device_init_wq,
+@@ -186,7 +196,7 @@ out_err:
+ static void __exit
+ cleanup_ccw_bus_type (void)
+ {
+-	driver_unregister(&io_subchannel_driver.drv);
++	css_driver_unregister(&io_subchannel_driver);
+ 	bus_unregister(&ccw_bus_type);
+ 	destroy_workqueue(ccw_device_notify_work);
+ 	destroy_workqueue(ccw_device_work);
+@@ -773,7 +783,7 @@ static void sch_attach_device(struct subchannel *sch,
+ {
+ 	css_update_ssd_info(sch);
+ 	spin_lock_irq(sch->lock);
+-	sch->dev.driver_data = cdev;
++	sch_set_cdev(sch, cdev);
+ 	cdev->private->schid = sch->schid;
+ 	cdev->ccwlock = sch->lock;
+ 	device_trigger_reprobe(sch);
+@@ -795,7 +805,7 @@ static void sch_attach_disconnected_device(struct subchannel *sch,
+ 		put_device(&other_sch->dev);
+ 		return;
+ 	}
+-	other_sch->dev.driver_data = NULL;
++	sch_set_cdev(other_sch, NULL);
+ 	/* No need to keep a subchannel without ccw device around. */
+ 	css_sch_device_unregister(other_sch);
+ 	put_device(&other_sch->dev);
+@@ -831,12 +841,12 @@ static void sch_create_and_recog_new_device(struct subchannel *sch)
+ 		return;
+ 	}
+ 	spin_lock_irq(sch->lock);
+-	sch->dev.driver_data = cdev;
++	sch_set_cdev(sch, cdev);
+ 	spin_unlock_irq(sch->lock);
+ 	/* Start recognition for the new ccw device. */
+ 	if (io_subchannel_recog(cdev, sch)) {
+ 		spin_lock_irq(sch->lock);
+-		sch->dev.driver_data = NULL;
++		sch_set_cdev(sch, NULL);
+ 		spin_unlock_irq(sch->lock);
+ 		if (cdev->dev.release)
+ 			cdev->dev.release(&cdev->dev);
+@@ -940,7 +950,7 @@ io_subchannel_register(struct work_struct *work)
+ 			      cdev->private->dev_id.devno, ret);
+ 		put_device(&cdev->dev);
+ 		spin_lock_irqsave(sch->lock, flags);
+-		sch->dev.driver_data = NULL;
++		sch_set_cdev(sch, NULL);
+ 		spin_unlock_irqrestore(sch->lock, flags);
+ 		kfree (cdev->private);
+ 		kfree (cdev);
+@@ -1022,7 +1032,7 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
+ 	int rc;
+ 	struct ccw_device_private *priv;
+ 
+-	sch->dev.driver_data = cdev;
++	sch_set_cdev(sch, cdev);
+ 	sch->driver = &io_subchannel_driver;
+ 	cdev->ccwlock = sch->lock;
+ 
+@@ -1082,7 +1092,7 @@ static void ccw_device_move_to_sch(struct work_struct *work)
+ 	}
+ 	if (former_parent) {
+ 		spin_lock_irq(former_parent->lock);
+-		former_parent->dev.driver_data = NULL;
++		sch_set_cdev(former_parent, NULL);
+ 		spin_unlock_irq(former_parent->lock);
+ 		css_sch_device_unregister(former_parent);
+ 		/* Reset intparm to zeroes. */
+@@ -1096,6 +1106,18 @@ out:
+ 	put_device(&cdev->dev);
+ }
+ 
++static void io_subchannel_irq(struct subchannel *sch)
++{
++	struct ccw_device *cdev;
++
++	cdev = sch_get_cdev(sch);
++
++	CIO_TRACE_EVENT(3, "IRQ");
++	CIO_TRACE_EVENT(3, sch->dev.bus_id);
++	if (cdev)
++		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
++}
++
+ static int
+ io_subchannel_probe (struct subchannel *sch)
+ {
+@@ -1104,13 +1126,13 @@ io_subchannel_probe (struct subchannel *sch)
+ 	unsigned long flags;
+ 	struct ccw_dev_id dev_id;
+ 
+-	if (sch->dev.driver_data) {
++	cdev = sch_get_cdev(sch);
++	if (cdev) {
+ 		/*
+ 		 * This subchannel already has an associated ccw_device.
+ 		 * Register it and exit. This happens for all early
+ 		 * device, e.g. the console.
+ 		 */
+-		cdev = sch->dev.driver_data;
+ 		cdev->dev.groups = ccwdev_attr_groups;
+ 		device_initialize(&cdev->dev);
+ 		ccw_device_register(cdev);
+@@ -1132,6 +1154,11 @@ io_subchannel_probe (struct subchannel *sch)
+ 	 */
+ 	dev_id.devno = sch->schib.pmcw.dev;
+ 	dev_id.ssid = sch->schid.ssid;
++	/* Allocate I/O subchannel private data. */
++	sch->private = kzalloc(sizeof(struct io_subchannel_private),
++			       GFP_KERNEL | GFP_DMA);
++	if (!sch->private)
++		return -ENOMEM;
+ 	cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
+ 	if (!cdev)
+ 		cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
+@@ -1149,16 +1176,18 @@ io_subchannel_probe (struct subchannel *sch)
+ 		return 0;
+ 	}
+ 	cdev = io_subchannel_create_ccwdev(sch);
+-	if (IS_ERR(cdev))
++	if (IS_ERR(cdev)) {
++		kfree(sch->private);
+ 		return PTR_ERR(cdev);
+-
++	}
+ 	rc = io_subchannel_recog(cdev, sch);
+ 	if (rc) {
+ 		spin_lock_irqsave(sch->lock, flags);
+-		sch->dev.driver_data = NULL;
++		sch_set_cdev(sch, NULL);
+ 		spin_unlock_irqrestore(sch->lock, flags);
+ 		if (cdev->dev.release)
+ 			cdev->dev.release(&cdev->dev);
++		kfree(sch->private);
+ 	}
+ 
+ 	return rc;
+@@ -1170,25 +1199,25 @@ io_subchannel_remove (struct subchannel *sch)
+ 	struct ccw_device *cdev;
+ 	unsigned long flags;
+ 
+-	if (!sch->dev.driver_data)
++	cdev = sch_get_cdev(sch);
++	if (!cdev)
+ 		return 0;
+-	cdev = sch->dev.driver_data;
+ 	/* Set ccw device to not operational and drop reference. */
+ 	spin_lock_irqsave(cdev->ccwlock, flags);
+-	sch->dev.driver_data = NULL;
++	sch_set_cdev(sch, NULL);
+ 	cdev->private->state = DEV_STATE_NOT_OPER;
+ 	spin_unlock_irqrestore(cdev->ccwlock, flags);
+ 	ccw_device_unregister(cdev);
+ 	put_device(&cdev->dev);
++	kfree(sch->private);
+ 	return 0;
+ }
+ 
+-static int
+-io_subchannel_notify(struct device *dev, int event)
++static int io_subchannel_notify(struct subchannel *sch, int event)
+ {
+ 	struct ccw_device *cdev;
+ 
+-	cdev = dev->driver_data;
++	cdev = sch_get_cdev(sch);
+ 	if (!cdev)
+ 		return 0;
+ 	if (!cdev->drv)
+@@ -1198,22 +1227,20 @@ io_subchannel_notify(struct device *dev, int event)
+ 	return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
+ }
+ 
+-static void
+-io_subchannel_verify(struct device *dev)
++static void io_subchannel_verify(struct subchannel *sch)
  {
- 	int error = 0;
- 	if (drv->probe != NULL)
--		error = sysfs_create_file(&drv->drv.kobj,
--					  &driver_attr_new_id.attr);
-+		error = driver_create_file(&drv->drv, &driver_attr_new_id);
- 	return error;
+ 	struct ccw_device *cdev;
+ 
+-	cdev = dev->driver_data;
++	cdev = sch_get_cdev(sch);
+ 	if (cdev)
+ 		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
  }
  
-diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c
-index bbf3ee1..7e29b90 100644
---- a/drivers/power/apm_power.c
-+++ b/drivers/power/apm_power.c
-@@ -13,6 +13,7 @@
- #include <linux/power_supply.h>
- #include <linux/apm-emulation.h>
+-static void
+-io_subchannel_ioterm(struct device *dev)
++static void io_subchannel_ioterm(struct subchannel *sch)
+ {
+ 	struct ccw_device *cdev;
  
-+static DEFINE_MUTEX(apm_mutex);
- #define PSY_PROP(psy, prop, val) psy->get_property(psy, \
- 			 POWER_SUPPLY_PROP_##prop, val)
+-	cdev = dev->driver_data;
++	cdev = sch_get_cdev(sch);
+ 	if (!cdev)
+ 		return;
+ 	/* Internal I/O will be retried by the interrupt handler. */
+@@ -1231,7 +1258,7 @@ io_subchannel_shutdown(struct subchannel *sch)
+ 	struct ccw_device *cdev;
+ 	int ret;
  
-@@ -23,67 +24,86 @@
+-	cdev = sch->dev.driver_data;
++	cdev = sch_get_cdev(sch);
  
- static struct power_supply *main_battery;
+ 	if (cio_is_console(sch->schid))
+ 		return;
+@@ -1271,6 +1298,9 @@ ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
+ {
+ 	int rc;
  
--static void find_main_battery(void)
--{
--	struct device *dev;
--	struct power_supply *bat = NULL;
--	struct power_supply *max_charge_bat = NULL;
--	struct power_supply *max_energy_bat = NULL;
-+struct find_bat_param {
-+	struct power_supply *main;
-+	struct power_supply *bat;
-+	struct power_supply *max_charge_bat;
-+	struct power_supply *max_energy_bat;
- 	union power_supply_propval full;
--	int max_charge = 0;
--	int max_energy = 0;
-+	int max_charge;
-+	int max_energy;
-+};
++	/* Attach subchannel private data. */
++	sch->private = cio_get_console_priv();
++	memset(sch->private, 0, sizeof(struct io_subchannel_private));
+ 	/* Initialize the ccw_device structure. */
+ 	cdev->dev.parent= &sch->dev;
+ 	rc = io_subchannel_recog(cdev, sch);
+@@ -1456,6 +1486,7 @@ int ccw_driver_register(struct ccw_driver *cdriver)
  
--	main_battery = NULL;
-+static int __find_main_battery(struct device *dev, void *data)
-+{
-+	struct find_bat_param *bp = (struct find_bat_param *)data;
+ 	drv->bus = &ccw_bus_type;
+ 	drv->name = cdriver->name;
++	drv->owner = cdriver->owner;
  
--	list_for_each_entry(dev, &power_supply_class->devices, node) {
--		bat = dev_get_drvdata(dev);
-+	bp->bat = dev_get_drvdata(dev);
+ 	return driver_register(drv);
+ }
+@@ -1481,6 +1512,60 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev)
+ 	return sch->schid;
+ }
  
--		if (bat->use_for_apm) {
--			/* nice, we explicitly asked to report this battery. */
--			main_battery = bat;
--			return;
--		}
-+	if (bp->bat->use_for_apm) {
-+		/* nice, we explicitly asked to report this battery. */
-+		bp->main = bp->bat;
-+		return 1;
++static int recovery_check(struct device *dev, void *data)
++{
++	struct ccw_device *cdev = to_ccwdev(dev);
++	int *redo = data;
++
++	spin_lock_irq(cdev->ccwlock);
++	switch (cdev->private->state) {
++	case DEV_STATE_DISCONNECTED:
++		CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
++			      cdev->private->dev_id.ssid,
++			      cdev->private->dev_id.devno);
++		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
++		*redo = 1;
++		break;
++	case DEV_STATE_DISCONNECTED_SENSE_ID:
++		*redo = 1;
++		break;
 +	}
- 
--		if (!PSY_PROP(bat, CHARGE_FULL_DESIGN, &full) ||
--				!PSY_PROP(bat, CHARGE_FULL, &full)) {
--			if (full.intval > max_charge) {
--				max_charge_bat = bat;
--				max_charge = full.intval;
--			}
--		} else if (!PSY_PROP(bat, ENERGY_FULL_DESIGN, &full) ||
--				!PSY_PROP(bat, ENERGY_FULL, &full)) {
--			if (full.intval > max_energy) {
--				max_energy_bat = bat;
--				max_energy = full.intval;
--			}
-+	if (!PSY_PROP(bp->bat, CHARGE_FULL_DESIGN, &bp->full) ||
-+			!PSY_PROP(bp->bat, CHARGE_FULL, &bp->full)) {
-+		if (bp->full.intval > bp->max_charge) {
-+			bp->max_charge_bat = bp->bat;
-+			bp->max_charge = bp->full.intval;
-+		}
-+	} else if (!PSY_PROP(bp->bat, ENERGY_FULL_DESIGN, &bp->full) ||
-+			!PSY_PROP(bp->bat, ENERGY_FULL, &bp->full)) {
-+		if (bp->full.intval > bp->max_energy) {
-+			bp->max_energy_bat = bp->bat;
-+			bp->max_energy = bp->full.intval;
- 		}
- 	}
++	spin_unlock_irq(cdev->ccwlock);
++
 +	return 0;
 +}
 +
-+static void find_main_battery(void)
++static void recovery_func(unsigned long data)
 +{
-+	struct find_bat_param bp;
-+	int error;
++	int redo = 0;
 +
-+	memset(&bp, 0, sizeof(struct find_bat_param));
-+	main_battery = NULL;
-+	bp.main = main_battery;
++	bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
++	if (redo) {
++		spin_lock_irq(&recovery_lock);
++		if (!timer_pending(&recovery_timer)) {
++			if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
++				recovery_phase++;
++			mod_timer(&recovery_timer, jiffies +
++				  recovery_delay[recovery_phase] * HZ);
++		}
++		spin_unlock_irq(&recovery_lock);
++	} else
++		CIO_MSG_EVENT(2, "recovery: end\n");
++}
 +
-+	error = class_for_each_device(power_supply_class, &bp,
-+				      __find_main_battery);
-+	if (error) {
-+		main_battery = bp.main;
-+		return;
++void ccw_device_schedule_recovery(void)
++{
++	unsigned long flags;
++
++	CIO_MSG_EVENT(2, "recovery: schedule\n");
++	spin_lock_irqsave(&recovery_lock, flags);
++	if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
++		recovery_phase = 0;
++		mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
 +	}
++	spin_unlock_irqrestore(&recovery_lock, flags);
++}
++
+ MODULE_LICENSE("GPL");
+ EXPORT_SYMBOL(ccw_device_set_online);
+ EXPORT_SYMBOL(ccw_device_set_offline);
+diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
+index 0d40896..d40a2ff 100644
+--- a/drivers/s390/cio/device.h
++++ b/drivers/s390/cio/device.h
+@@ -5,6 +5,8 @@
+ #include <asm/atomic.h>
+ #include <linux/wait.h>
  
--	if ((max_energy_bat && max_charge_bat) &&
--			(max_energy_bat != max_charge_bat)) {
-+	if ((bp.max_energy_bat && bp.max_charge_bat) &&
-+			(bp.max_energy_bat != bp.max_charge_bat)) {
- 		/* try guess battery with more capacity */
--		if (!PSY_PROP(max_charge_bat, VOLTAGE_MAX_DESIGN, &full)) {
--			if (max_energy > max_charge * full.intval)
--				main_battery = max_energy_bat;
-+		if (!PSY_PROP(bp.max_charge_bat, VOLTAGE_MAX_DESIGN,
-+			      &bp.full)) {
-+			if (bp.max_energy > bp.max_charge * bp.full.intval)
-+				main_battery = bp.max_energy_bat;
- 			else
--				main_battery = max_charge_bat;
--		} else if (!PSY_PROP(max_energy_bat, VOLTAGE_MAX_DESIGN,
--								  &full)) {
--			if (max_charge > max_energy / full.intval)
--				main_battery = max_charge_bat;
-+				main_battery = bp.max_charge_bat;
-+		} else if (!PSY_PROP(bp.max_energy_bat, VOLTAGE_MAX_DESIGN,
-+								  &bp.full)) {
-+			if (bp.max_charge > bp.max_energy / bp.full.intval)
-+				main_battery = bp.max_charge_bat;
- 			else
--				main_battery = max_energy_bat;
-+				main_battery = bp.max_energy_bat;
- 		} else {
- 			/* give up, choice any */
--			main_battery = max_energy_bat;
-+			main_battery = bp.max_energy_bat;
- 		}
--	} else if (max_charge_bat) {
--		main_battery = max_charge_bat;
--	} else if (max_energy_bat) {
--		main_battery = max_energy_bat;
-+	} else if (bp.max_charge_bat) {
-+		main_battery = bp.max_charge_bat;
-+	} else if (bp.max_energy_bat) {
-+		main_battery = bp.max_energy_bat;
- 	} else {
- 		/* give up, try the last if any */
--		main_battery = bat;
-+		main_battery = bp.bat;
- 	}
- }
++#include "io_sch.h"
++
+ /*
+  * states of the device statemachine
+  */
+@@ -74,7 +76,6 @@ extern struct workqueue_struct *ccw_device_notify_work;
+ extern wait_queue_head_t ccw_device_init_wq;
+ extern atomic_t ccw_device_init_count;
  
-@@ -207,10 +227,10 @@ static void apm_battery_apm_get_power_status(struct apm_power_info *info)
- 	union power_supply_propval status;
- 	union power_supply_propval capacity, time_to_full, time_to_empty;
+-void io_subchannel_irq (struct device *pdev);
+ void io_subchannel_recog_done(struct ccw_device *cdev);
  
--	down(&power_supply_class->sem);
-+	mutex_lock(&apm_mutex);
- 	find_main_battery();
- 	if (!main_battery) {
--		up(&power_supply_class->sem);
-+		mutex_unlock(&apm_mutex);
- 		return;
- 	}
+ int ccw_device_cancel_halt_clear(struct ccw_device *);
+@@ -87,6 +88,8 @@ int ccw_device_recognition(struct ccw_device *);
+ int ccw_device_online(struct ccw_device *);
+ int ccw_device_offline(struct ccw_device *);
  
-@@ -278,7 +298,7 @@ static void apm_battery_apm_get_power_status(struct apm_power_info *info)
- 		}
- 	}
++void ccw_device_schedule_recovery(void);
++
+ /* Function prototypes for device status and basic sense stuff. */
+ void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
+ void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
+diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
+index bfad421..4b92c84 100644
+--- a/drivers/s390/cio/device_fsm.c
++++ b/drivers/s390/cio/device_fsm.c
+@@ -25,14 +25,16 @@
+ #include "ioasm.h"
+ #include "chp.h"
  
--	up(&power_supply_class->sem);
-+	mutex_unlock(&apm_mutex);
++static int timeout_log_enabled;
++
+ int
+ device_is_online(struct subchannel *sch)
+ {
+ 	struct ccw_device *cdev;
+ 
+-	if (!sch->dev.driver_data)
++	cdev = sch_get_cdev(sch);
++	if (!cdev)
+ 		return 0;
+-	cdev = sch->dev.driver_data;
+ 	return (cdev->private->state == DEV_STATE_ONLINE);
  }
  
- static int __init apm_battery_init(void)
-diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
-index a63b75c..03d6a38 100644
---- a/drivers/power/power_supply_core.c
-+++ b/drivers/power/power_supply_core.c
-@@ -20,28 +20,29 @@
+@@ -41,9 +43,9 @@ device_is_disconnected(struct subchannel *sch)
+ {
+ 	struct ccw_device *cdev;
  
- struct class *power_supply_class;
+-	if (!sch->dev.driver_data)
++	cdev = sch_get_cdev(sch);
++	if (!cdev)
+ 		return 0;
+-	cdev = sch->dev.driver_data;
+ 	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
+ 		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
+ }
+@@ -53,19 +55,21 @@ device_set_disconnected(struct subchannel *sch)
+ {
+ 	struct ccw_device *cdev;
  
-+static int __power_supply_changed_work(struct device *dev, void *data)
+-	if (!sch->dev.driver_data)
++	cdev = sch_get_cdev(sch);
++	if (!cdev)
+ 		return;
+-	cdev = sch->dev.driver_data;
+ 	ccw_device_set_timeout(cdev, 0);
+ 	cdev->private->flags.fake_irb = 0;
+ 	cdev->private->state = DEV_STATE_DISCONNECTED;
++	if (cdev->online)
++		ccw_device_schedule_recovery();
+ }
+ 
+ void device_set_intretry(struct subchannel *sch)
+ {
+ 	struct ccw_device *cdev;
+ 
+-	cdev = sch->dev.driver_data;
++	cdev = sch_get_cdev(sch);
+ 	if (!cdev)
+ 		return;
+ 	cdev->private->flags.intretry = 1;
+@@ -75,13 +79,62 @@ int device_trigger_verify(struct subchannel *sch)
+ {
+ 	struct ccw_device *cdev;
+ 
+-	cdev = sch->dev.driver_data;
++	cdev = sch_get_cdev(sch);
+ 	if (!cdev || !cdev->online)
+ 		return -EINVAL;
+ 	dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+ 	return 0;
+ }
+ 
++static int __init ccw_timeout_log_setup(char *unused)
 +{
-+	struct power_supply *psy = (struct power_supply *)data;
-+	struct power_supply *pst = dev_get_drvdata(dev);
-+	int i;
++	timeout_log_enabled = 1;
++	return 1;
++}
 +
-+	for (i = 0; i < psy->num_supplicants; i++)
-+		if (!strcmp(psy->supplied_to[i], pst->name)) {
-+			if (pst->external_power_changed)
-+				pst->external_power_changed(pst);
-+		}
-+	return 0;
++__setup("ccw_timeout_log", ccw_timeout_log_setup);
++
++static void ccw_timeout_log(struct ccw_device *cdev)
++{
++	struct schib schib;
++	struct subchannel *sch;
++	struct io_subchannel_private *private;
++	int cc;
++
++	sch = to_subchannel(cdev->dev.parent);
++	private = to_io_private(sch);
++	cc = stsch(sch->schid, &schib);
++
++	printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
++	       "device information:\n", get_clock());
++	printk(KERN_WARNING "cio: orb:\n");
++	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
++		       &private->orb, sizeof(private->orb), 0);
++	printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id);
++	printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id);
++	printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
++	       "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
++
++	if ((void *)(addr_t)private->orb.cpa == &private->sense_ccw ||
++	    (void *)(addr_t)private->orb.cpa == cdev->private->iccws)
++		printk(KERN_WARNING "cio: last channel program (intern):\n");
++	else
++		printk(KERN_WARNING "cio: last channel program:\n");
++
++	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
++		       (void *)(addr_t)private->orb.cpa,
++		       sizeof(struct ccw1), 0);
++	printk(KERN_WARNING "cio: ccw device state: %d\n",
++	       cdev->private->state);
++	printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
++	printk(KERN_WARNING "cio: schib:\n");
++	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
++		       &schib, sizeof(schib), 0);
++	printk(KERN_WARNING "cio: ccw device flags:\n");
++	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
++		       &cdev->private->flags, sizeof(cdev->private->flags), 0);
 +}
 +
- static void power_supply_changed_work(struct work_struct *work)
+ /*
+  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
+  */
+@@ -92,6 +145,8 @@ ccw_device_timeout(unsigned long data)
+ 
+ 	cdev = (struct ccw_device *) data;
+ 	spin_lock_irq(cdev->ccwlock);
++	if (timeout_log_enabled)
++		ccw_timeout_log(cdev);
+ 	dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
+ 	spin_unlock_irq(cdev->ccwlock);
+ }
+@@ -122,9 +177,9 @@ device_kill_pending_timer(struct subchannel *sch)
  {
- 	struct power_supply *psy = container_of(work, struct power_supply,
- 						changed_work);
--	int i;
+ 	struct ccw_device *cdev;
  
- 	dev_dbg(psy->dev, "%s\n", __FUNCTION__);
+-	if (!sch->dev.driver_data)
++	cdev = sch_get_cdev(sch);
++	if (!cdev)
+ 		return;
+-	cdev = sch->dev.driver_data;
+ 	ccw_device_set_timeout(cdev, 0);
+ }
  
--	for (i = 0; i < psy->num_supplicants; i++) {
--		struct device *dev;
--
--		down(&power_supply_class->sem);
--		list_for_each_entry(dev, &power_supply_class->devices, node) {
--			struct power_supply *pst = dev_get_drvdata(dev);
--
--			if (!strcmp(psy->supplied_to[i], pst->name)) {
--				if (pst->external_power_changed)
--					pst->external_power_changed(pst);
--			}
--		}
--		up(&power_supply_class->sem);
--	}
-+	class_for_each_device(power_supply_class, psy,
-+			      __power_supply_changed_work);
+@@ -268,7 +323,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
+ 	switch (state) {
+ 	case DEV_STATE_NOT_OPER:
+ 		CIO_DEBUG(KERN_WARNING, 2,
+-			  "cio: SenseID : unknown device %04x on subchannel "
++			  "SenseID : unknown device %04x on subchannel "
+ 			  "0.%x.%04x\n", cdev->private->dev_id.devno,
+ 			  sch->schid.ssid, sch->schid.sch_no);
+ 		break;
+@@ -294,7 +349,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
+ 		}
+ 		/* Issue device info message. */
+ 		CIO_DEBUG(KERN_INFO, 2,
+-			  "cio: SenseID : device 0.%x.%04x reports: "
++			  "SenseID : device 0.%x.%04x reports: "
+ 			  "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
+ 			  "%04X/%02X\n",
+ 			  cdev->private->dev_id.ssid,
+@@ -304,7 +359,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
+ 		break;
+ 	case DEV_STATE_BOXED:
+ 		CIO_DEBUG(KERN_WARNING, 2,
+-			  "cio: SenseID : boxed device %04x on subchannel "
++			  "SenseID : boxed device %04x on subchannel "
+ 			  "0.%x.%04x\n", cdev->private->dev_id.devno,
+ 			  sch->schid.ssid, sch->schid.sch_no);
+ 		break;
+@@ -349,7 +404,7 @@ ccw_device_oper_notify(struct work_struct *work)
+ 	sch = to_subchannel(cdev->dev.parent);
+ 	if (sch->driver && sch->driver->notify) {
+ 		spin_unlock_irqrestore(cdev->ccwlock, flags);
+-		ret = sch->driver->notify(&sch->dev, CIO_OPER);
++		ret = sch->driver->notify(sch, CIO_OPER);
+ 		spin_lock_irqsave(cdev->ccwlock, flags);
+ 	} else
+ 		ret = 0;
+@@ -389,7 +444,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
  
- 	power_supply_update_leds(psy);
+ 	if (state == DEV_STATE_BOXED)
+ 		CIO_DEBUG(KERN_WARNING, 2,
+-			  "cio: Boxed device %04x on subchannel %04x\n",
++			  "Boxed device %04x on subchannel %04x\n",
+ 			  cdev->private->dev_id.devno, sch->schid.sch_no);
+ 
+ 	if (cdev->private->flags.donotify) {
+@@ -500,7 +555,8 @@ ccw_device_recognition(struct ccw_device *cdev)
+ 	    (cdev->private->state != DEV_STATE_BOXED))
+ 		return -EINVAL;
+ 	sch = to_subchannel(cdev->dev.parent);
+-	ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
++	ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc,
++				    (u32)(addr_t)sch);
+ 	if (ret != 0)
+ 		/* Couldn't enable the subchannel for i/o. Sick device. */
+ 		return ret;
+@@ -587,9 +643,10 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
+ 	default:
+ 		/* Reset oper notify indication after verify error. */
+ 		cdev->private->flags.donotify = 0;
+-		if (cdev->online)
++		if (cdev->online) {
++			ccw_device_set_timeout(cdev, 0);
+ 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
+-		else
++		} else
+ 			ccw_device_done(cdev, DEV_STATE_NOT_OPER);
+ 		break;
+ 	}
+@@ -610,7 +667,8 @@ ccw_device_online(struct ccw_device *cdev)
+ 	sch = to_subchannel(cdev->dev.parent);
+ 	if (css_init_done && !get_device(&cdev->dev))
+ 		return -ENODEV;
+-	ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
++	ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc,
++				    (u32)(addr_t)sch);
+ 	if (ret != 0) {
+ 		/* Couldn't enable the subchannel for i/o. Sick device. */
+ 		if (ret == -ENODEV)
+@@ -937,7 +995,7 @@ void device_kill_io(struct subchannel *sch)
+ 	int ret;
+ 	struct ccw_device *cdev;
  
-@@ -55,32 +56,35 @@ void power_supply_changed(struct power_supply *psy)
- 	schedule_work(&psy->changed_work);
- }
+-	cdev = sch->dev.driver_data;
++	cdev = sch_get_cdev(sch);
+ 	ret = ccw_device_cancel_halt_clear(cdev);
+ 	if (ret == -EBUSY) {
+ 		ccw_device_set_timeout(cdev, 3*HZ);
+@@ -990,7 +1048,8 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
+ 	struct subchannel *sch;
+ 
+ 	sch = to_subchannel(cdev->dev.parent);
+-	if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
++	if (cio_enable_subchannel(sch, sch->schib.pmcw.isc,
++				  (u32)(addr_t)sch) != 0)
+ 		/* Couldn't enable the subchannel for i/o. Sick device. */
+ 		return;
  
--int power_supply_am_i_supplied(struct power_supply *psy)
-+static int __power_supply_am_i_supplied(struct device *dev, void *data)
+@@ -1006,9 +1065,9 @@ device_trigger_reprobe(struct subchannel *sch)
  {
- 	union power_supply_propval ret = {0,};
--	struct device *dev;
+ 	struct ccw_device *cdev;
+ 
+-	if (!sch->dev.driver_data)
++	cdev = sch_get_cdev(sch);
++	if (!cdev)
+ 		return;
+-	cdev = sch->dev.driver_data;
+ 	if (cdev->private->state != DEV_STATE_DISCONNECTED)
+ 		return;
+ 
+@@ -1028,7 +1087,7 @@ device_trigger_reprobe(struct subchannel *sch)
+ 	sch->schib.pmcw.ena = 0;
+ 	if ((sch->lpm & (sch->lpm - 1)) != 0)
+ 		sch->schib.pmcw.mp = 1;
+-	sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
++	sch->schib.pmcw.intparm = (u32)(addr_t)sch;
+ 	/* We should also udate ssd info, but this has to wait. */
+ 	/* Check if this is another device which appeared on the same sch. */
+ 	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
+@@ -1223,21 +1282,4 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
+ 	},
+ };
+ 
+-/*
+- * io_subchannel_irq is called for "real" interrupts or for status
+- * pending conditions on msch.
+- */
+-void
+-io_subchannel_irq (struct device *pdev)
+-{
+-	struct ccw_device *cdev;
 -
--	down(&power_supply_class->sem);
--	list_for_each_entry(dev, &power_supply_class->devices, node) {
--		struct power_supply *epsy = dev_get_drvdata(dev);
--		int i;
+-	cdev = to_subchannel(pdev)->dev.driver_data;
 -
--		for (i = 0; i < epsy->num_supplicants; i++) {
--			if (!strcmp(epsy->supplied_to[i], psy->name)) {
--				if (epsy->get_property(epsy,
--					  POWER_SUPPLY_PROP_ONLINE, &ret))
--					continue;
--				if (ret.intval)
--					goto out;
--			}
-+	struct power_supply *psy = (struct power_supply *)data;
-+	struct power_supply *epsy = dev_get_drvdata(dev);
-+	int i;
-+
-+	for (i = 0; i < epsy->num_supplicants; i++) {
-+		if (!strcmp(epsy->supplied_to[i], psy->name)) {
-+			if (epsy->get_property(epsy,
-+				  POWER_SUPPLY_PROP_ONLINE, &ret))
-+				continue;
-+			if (ret.intval)
-+				return ret.intval;
- 		}
+-	CIO_TRACE_EVENT (3, "IRQ");
+-	CIO_TRACE_EVENT (3, pdev->bus_id);
+-	if (cdev)
+-		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
+-}
+-
+ EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
+diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
+index 156f3f9..918b8b8 100644
+--- a/drivers/s390/cio/device_id.c
++++ b/drivers/s390/cio/device_id.c
+@@ -24,6 +24,7 @@
+ #include "css.h"
+ #include "device.h"
+ #include "ioasm.h"
++#include "io_sch.h"
+ 
+ /*
+  * Input :
+@@ -219,11 +220,13 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
+ 		return -EAGAIN;
  	}
--out:
--	up(&power_supply_class->sem);
-+	return 0;
-+}
-+
-+int power_supply_am_i_supplied(struct power_supply *psy)
-+{
-+	int error;
+ 	if (irb->scsw.cc == 3) {
+-		if ((sch->orb.lpm &
+-		     sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
++		u8 lpm;
++
++		lpm = to_io_private(sch)->orb.lpm;
++		if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
+ 			CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x "
+ 				      "on subchannel 0.%x.%04x is "
+-				      "'not operational'\n", sch->orb.lpm,
++				      "'not operational'\n", lpm,
+ 				      cdev->private->dev_id.devno,
+ 				      sch->schid.ssid, sch->schid.sch_no);
+ 		return -EACCES;
+diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
+index 7fd2dad..49b58eb 100644
+--- a/drivers/s390/cio/device_ops.c
++++ b/drivers/s390/cio/device_ops.c
+@@ -501,7 +501,7 @@ ccw_device_stlck(struct ccw_device *cdev)
+ 		return -ENOMEM;
+ 	}
+ 	spin_lock_irqsave(sch->lock, flags);
+-	ret = cio_enable_subchannel(sch, 3);
++	ret = cio_enable_subchannel(sch, 3, (u32)(addr_t)sch);
+ 	if (ret)
+ 		goto out_unlock;
+ 	/*
+diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
+index cb1879a..c52449a 100644
+--- a/drivers/s390/cio/device_pgid.c
++++ b/drivers/s390/cio/device_pgid.c
+@@ -22,6 +22,7 @@
+ #include "css.h"
+ #include "device.h"
+ #include "ioasm.h"
++#include "io_sch.h"
+ 
+ /*
+  * Helper function called from interrupt context to decide whether an
+@@ -155,10 +156,13 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
+ 		return -EAGAIN;
+ 	}
+ 	if (irb->scsw.cc == 3) {
++		u8 lpm;
 +
-+	error = class_for_each_device(power_supply_class, psy,
-+				      __power_supply_am_i_supplied);
++		lpm = to_io_private(sch)->orb.lpm;
+ 		CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x,"
+ 			      " lpm %02X, became 'not operational'\n",
+ 			      cdev->private->dev_id.devno, sch->schid.ssid,
+-			      sch->schid.sch_no, sch->orb.lpm);
++			      sch->schid.sch_no, lpm);
+ 		return -EACCES;
+ 	}
+ 	i = 8 - ffs(cdev->private->imask);
+diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
+index aa96e67..ebe0848 100644
+--- a/drivers/s390/cio/device_status.c
++++ b/drivers/s390/cio/device_status.c
+@@ -20,6 +20,7 @@
+ #include "css.h"
+ #include "device.h"
+ #include "ioasm.h"
++#include "io_sch.h"
  
--	dev_dbg(psy->dev, "%s %d\n", __FUNCTION__, ret.intval);
-+	dev_dbg(psy->dev, "%s %d\n", __FUNCTION__, error);
+ /*
+  * Check for any kind of channel or interface control check but don't
+@@ -310,6 +311,7 @@ int
+ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
+ {
+ 	struct subchannel *sch;
++	struct ccw1 *sense_ccw;
  
--	return ret.intval;
-+	return error;
- }
+ 	sch = to_subchannel(cdev->dev.parent);
  
- int power_supply_register(struct device *parent, struct power_supply *psy)
-diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
-index f1e00ff..7e3ad4f 100644
---- a/drivers/rtc/interface.c
-+++ b/drivers/rtc/interface.c
-@@ -251,20 +251,23 @@ void rtc_update_irq(struct rtc_device *rtc,
+@@ -326,15 +328,16 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
+ 	/*
+ 	 * We have ending status but no sense information. Do a basic sense.
+ 	 */
+-	sch->sense_ccw.cmd_code = CCW_CMD_BASIC_SENSE;
+-	sch->sense_ccw.cda = (__u32) __pa(cdev->private->irb.ecw);
+-	sch->sense_ccw.count = SENSE_MAX_COUNT;
+-	sch->sense_ccw.flags = CCW_FLAG_SLI;
++	sense_ccw = &to_io_private(sch)->sense_ccw;
++	sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
++	sense_ccw->cda = (__u32) __pa(cdev->private->irb.ecw);
++	sense_ccw->count = SENSE_MAX_COUNT;
++	sense_ccw->flags = CCW_FLAG_SLI;
+ 
+ 	/* Reset internal retry indication. */
+ 	cdev->private->flags.intretry = 0;
+ 
+-	return cio_start (sch, &sch->sense_ccw, 0xff);
++	return cio_start(sch, sense_ccw, 0xff);
  }
- EXPORT_SYMBOL_GPL(rtc_update_irq);
  
-+static int __rtc_match(struct device *dev, void *data)
+ /*
+diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
+new file mode 100644
+index 0000000..8c61316
+--- /dev/null
++++ b/drivers/s390/cio/io_sch.h
+@@ -0,0 +1,163 @@
++#ifndef S390_IO_SCH_H
++#define S390_IO_SCH_H
++
++#include "schid.h"
++
++/*
++ * operation request block
++ */
++struct orb {
++	u32 intparm;	/* interruption parameter */
++	u32 key  : 4;	/* flags, like key, suspend control, etc. */
++	u32 spnd : 1;	/* suspend control */
++	u32 res1 : 1;	/* reserved */
++	u32 mod  : 1;	/* modification control */
++	u32 sync : 1;	/* synchronize control */
++	u32 fmt  : 1;	/* format control */
++	u32 pfch : 1;	/* prefetch control */
++	u32 isic : 1;	/* initial-status-interruption control */
++	u32 alcc : 1;	/* address-limit-checking control */
++	u32 ssic : 1;	/* suppress-suspended-interr. control */
++	u32 res2 : 1;	/* reserved */
++	u32 c64  : 1;	/* IDAW/QDIO 64 bit control  */
++	u32 i2k  : 1;	/* IDAW 2/4kB block size control */
++	u32 lpm  : 8;	/* logical path mask */
++	u32 ils  : 1;	/* incorrect length */
++	u32 zero : 6;	/* reserved zeros */
++	u32 orbx : 1;	/* ORB extension control */
++	u32 cpa;	/* channel program address */
++}  __attribute__ ((packed, aligned(4)));
++
++struct io_subchannel_private {
++	struct orb orb;		/* operation request block */
++	struct ccw1 sense_ccw;	/* static ccw for sense command */
++} __attribute__ ((aligned(8)));
++
++#define to_io_private(n) ((struct io_subchannel_private *)n->private)
++#define sch_get_cdev(n) (dev_get_drvdata(&n->dev))
++#define sch_set_cdev(n, c) (dev_set_drvdata(&n->dev, c))
++
++#define MAX_CIWS 8
++
++/*
++ * sense-id response buffer layout
++ */
++struct senseid {
++	/* common part */
++	u8  reserved;	/* always 0x'FF' */
++	u16 cu_type;	/* control unit type */
++	u8  cu_model;	/* control unit model */
++	u16 dev_type;	/* device type */
++	u8  dev_model;	/* device model */
++	u8  unused;	/* padding byte */
++	/* extended part */
++	struct ciw ciw[MAX_CIWS];	/* variable # of CIWs */
++}  __attribute__ ((packed, aligned(4)));
++
++struct ccw_device_private {
++	struct ccw_device *cdev;
++	struct subchannel *sch;
++	int state;		/* device state */
++	atomic_t onoff;
++	unsigned long registered;
++	struct ccw_dev_id dev_id;	/* device id */
++	struct subchannel_id schid;	/* subchannel number */
++	u8 imask;		/* lpm mask for SNID/SID/SPGID */
++	int iretry;		/* retry counter SNID/SID/SPGID */
++	struct {
++		unsigned int fast:1;	/* post with "channel end" */
++		unsigned int repall:1;	/* report every interrupt status */
++		unsigned int pgroup:1;	/* do path grouping */
++		unsigned int force:1;	/* allow forced online */
++	} __attribute__ ((packed)) options;
++	struct {
++		unsigned int pgid_single:1; /* use single path for Set PGID */
++		unsigned int esid:1;	    /* Ext. SenseID supported by HW */
++		unsigned int dosense:1;	    /* delayed SENSE required */
++		unsigned int doverify:1;    /* delayed path verification */
++		unsigned int donotify:1;    /* call notify function */
++		unsigned int recog_done:1;  /* dev. recog. complete */
++		unsigned int fake_irb:1;    /* deliver faked irb */
++		unsigned int intretry:1;    /* retry internal operation */
++	} __attribute__((packed)) flags;
++	unsigned long intparm;	/* user interruption parameter */
++	struct qdio_irq *qdio_data;
++	struct irb irb;		/* device status */
++	struct senseid senseid;	/* SenseID info */
++	struct pgid pgid[8];	/* path group IDs per chpid*/
++	struct ccw1 iccws[2];	/* ccws for SNID/SID/SPGID commands */
++	struct work_struct kick_work;
++	wait_queue_head_t wait_q;
++	struct timer_list timer;
++	void *cmb;			/* measurement information */
++	struct list_head cmb_list;	/* list of measured devices */
++	u64 cmb_start_time;		/* clock value of cmb reset */
++	void *cmb_wait;			/* deferred cmb enable/disable */
++};
++
++static inline int ssch(struct subchannel_id schid, volatile struct orb *addr)
 +{
-+	char *name = (char *)data;
++	register struct subchannel_id reg1 asm("1") = schid;
++	int ccode;
 +
-+	if (strncmp(dev->bus_id, name, BUS_ID_SIZE) == 0)
-+		return 1;
-+	return 0;
++	asm volatile(
++		"	ssch	0(%2)\n"
++		"	ipm	%0\n"
++		"	srl	%0,28"
++		: "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
++	return ccode;
 +}
 +
- struct rtc_device *rtc_class_open(char *name)
- {
- 	struct device *dev;
- 	struct rtc_device *rtc = NULL;
++static inline int rsch(struct subchannel_id schid)
++{
++	register struct subchannel_id reg1 asm("1") = schid;
++	int ccode;
++
++	asm volatile(
++		"	rsch\n"
++		"	ipm	%0\n"
++		"	srl	%0,28"
++		: "=d" (ccode) : "d" (reg1) : "cc");
++	return ccode;
++}
++
++static inline int csch(struct subchannel_id schid)
++{
++	register struct subchannel_id reg1 asm("1") = schid;
++	int ccode;
++
++	asm volatile(
++		"	csch\n"
++		"	ipm	%0\n"
++		"	srl	%0,28"
++		: "=d" (ccode) : "d" (reg1) : "cc");
++	return ccode;
++}
++
++static inline int hsch(struct subchannel_id schid)
++{
++	register struct subchannel_id reg1 asm("1") = schid;
++	int ccode;
++
++	asm volatile(
++		"	hsch\n"
++		"	ipm	%0\n"
++		"	srl	%0,28"
++		: "=d" (ccode) : "d" (reg1) : "cc");
++	return ccode;
++}
++
++static inline int xsch(struct subchannel_id schid)
++{
++	register struct subchannel_id reg1 asm("1") = schid;
++	int ccode;
++
++	asm volatile(
++		"	.insn	rre,0xb2760000,%1,0\n"
++		"	ipm	%0\n"
++		"	srl	%0,28"
++		: "=d" (ccode) : "d" (reg1) : "cc");
++	return ccode;
++}
++
++#endif
+diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
+index 7153dd9..652ea36 100644
+--- a/drivers/s390/cio/ioasm.h
++++ b/drivers/s390/cio/ioasm.h
+@@ -109,72 +109,6 @@ static inline int tpi( volatile struct tpi_info *addr)
+ 	return ccode;
+ }
+ 
+-static inline int ssch(struct subchannel_id schid,
+-			   volatile struct orb *addr)
+-{
+-	register struct subchannel_id reg1 asm ("1") = schid;
+-	int ccode;
+-
+-	asm volatile(
+-		"	ssch	0(%2)\n"
+-		"	ipm	%0\n"
+-		"	srl	%0,28"
+-		: "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
+-	return ccode;
+-}
+-
+-static inline int rsch(struct subchannel_id schid)
+-{
+-	register struct subchannel_id reg1 asm ("1") = schid;
+-	int ccode;
+-
+-	asm volatile(
+-		"	rsch\n"
+-		"	ipm	%0\n"
+-		"	srl	%0,28"
+-		: "=d" (ccode) : "d" (reg1) : "cc");
+-	return ccode;
+-}
+-
+-static inline int csch(struct subchannel_id schid)
+-{
+-	register struct subchannel_id reg1 asm ("1") = schid;
+-	int ccode;
+-
+-	asm volatile(
+-		"	csch\n"
+-		"	ipm	%0\n"
+-		"	srl	%0,28"
+-		: "=d" (ccode) : "d" (reg1) : "cc");
+-	return ccode;
+-}
+-
+-static inline int hsch(struct subchannel_id schid)
+-{
+-	register struct subchannel_id reg1 asm ("1") = schid;
+-	int ccode;
+-
+-	asm volatile(
+-		"	hsch\n"
+-		"	ipm	%0\n"
+-		"	srl	%0,28"
+-		: "=d" (ccode) : "d" (reg1) : "cc");
+-	return ccode;
+-}
+-
+-static inline int xsch(struct subchannel_id schid)
+-{
+-	register struct subchannel_id reg1 asm ("1") = schid;
+-	int ccode;
+-
+-	asm volatile(
+-		"	.insn	rre,0xb2760000,%1,0\n"
+-		"	ipm	%0\n"
+-		"	srl	%0,28"
+-		: "=d" (ccode) : "d" (reg1) : "cc");
+-	return ccode;
+-}
+-
+ static inline int chsc(void *chsc_area)
+ {
+ 	typedef struct { char _[4096]; } addr_type;
+diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
+index 40a3208..e2a781b 100644
+--- a/drivers/s390/cio/qdio.c
++++ b/drivers/s390/cio/qdio.c
+@@ -48,11 +48,11 @@
+ #include <asm/debug.h>
+ #include <asm/s390_rdev.h>
+ #include <asm/qdio.h>
++#include <asm/airq.h>
+ 
+ #include "cio.h"
+ #include "css.h"
+ #include "device.h"
+-#include "airq.h"
+ #include "qdio.h"
+ #include "ioasm.h"
+ #include "chsc.h"
+@@ -96,7 +96,7 @@ static debug_info_t *qdio_dbf_slsb_in;
+ static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change
+ 						 during a while loop */
+ static DEFINE_SPINLOCK(ttiq_list_lock);
+-static int register_thinint_result;
++static void *tiqdio_ind;
+ static void tiqdio_tl(unsigned long);
+ static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0);
  
--	down(&rtc_class->sem);
--	list_for_each_entry(dev, &rtc_class->devices, node) {
--		if (strncmp(dev->bus_id, name, BUS_ID_SIZE) == 0) {
--			dev = get_device(dev);
--			if (dev)
--				rtc = to_rtc_device(dev);
--			break;
--		}
--	}
-+	dev = class_find_device(rtc_class, name, __rtc_match);
-+	if (dev)
-+		rtc = to_rtc_device(dev);
+@@ -399,7 +399,7 @@ qdio_get_indicator(void)
+ {
+ 	int i;
  
- 	if (rtc) {
- 		if (!try_module_get(rtc->owner)) {
-@@ -272,7 +275,6 @@ struct rtc_device *rtc_class_open(char *name)
- 			rtc = NULL;
+-	for (i=1;i<INDICATORS_PER_CACHELINE;i++)
++	for (i = 0; i < INDICATORS_PER_CACHELINE; i++)
+ 		if (!indicator_used[i]) {
+ 			indicator_used[i]=1;
+ 			return indicators+i;
+@@ -1408,8 +1408,7 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
+ 	if (q->hydra_gives_outbound_pcis) {
+ 		if (!q->siga_sync_done_on_thinints) {
+ 			SYNC_MEMORY_ALL;
+-		} else if ((!q->siga_sync_done_on_outb_tis)&&
+-			 (q->hydra_gives_outbound_pcis)) {
++		} else if (!q->siga_sync_done_on_outb_tis) {
+ 			SYNC_MEMORY_ALL_OUTB;
  		}
+ 	} else {
+@@ -1911,8 +1910,7 @@ qdio_fill_thresholds(struct qdio_irq *irq_ptr,
  	}
--	up(&rtc_class->sem);
+ }
  
- 	return rtc;
+-static int
+-tiqdio_thinint_handler(void)
++static void tiqdio_thinint_handler(void *ind, void *drv_data)
+ {
+ 	QDIO_DBF_TEXT4(0,trace,"thin_int");
+ 
+@@ -1925,7 +1923,6 @@ tiqdio_thinint_handler(void)
+ 		tiqdio_clear_global_summary();
+ 
+ 	tiqdio_inbound_checks();
+-	return 0;
  }
-diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
-index 5322e5e..9dc77f1 100644
---- a/drivers/s390/char/sclp_config.c
-+++ b/drivers/s390/char/sclp_config.c
-@@ -29,12 +29,12 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
- 	struct sys_device *sysdev;
  
- 	printk(KERN_WARNING TAG "cpu capability changed.\n");
--	lock_cpu_hotplug();
-+	get_online_cpus();
- 	for_each_online_cpu(cpu) {
- 		sysdev = get_cpu_sysdev(cpu);
- 		kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+ static void
+@@ -2445,7 +2442,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
+ 		real_addr_dev_st_chg_ind=0;
+ 	} else {
+ 		real_addr_local_summary_bit=
+-			virt_to_phys((volatile void *)indicators);
++			virt_to_phys((volatile void *)tiqdio_ind);
+ 		real_addr_dev_st_chg_ind=
+ 			virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind);
+ 	}
+@@ -3740,23 +3737,25 @@ static void
+ tiqdio_register_thinints(void)
+ {
+ 	char dbf_text[20];
+-	register_thinint_result=
+-		s390_register_adapter_interrupt(&tiqdio_thinint_handler);
+-	if (register_thinint_result) {
+-		sprintf(dbf_text,"regthn%x",(register_thinint_result&0xff));
++
++	tiqdio_ind =
++		s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL);
++	if (IS_ERR(tiqdio_ind)) {
++		sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind));
+ 		QDIO_DBF_TEXT0(0,setup,dbf_text);
+ 		QDIO_PRINT_ERR("failed to register adapter handler " \
+-			       "(rc=%i).\nAdapter interrupts might " \
++			       "(rc=%li).\nAdapter interrupts might " \
+ 			       "not work. Continuing.\n",
+-			       register_thinint_result);
++			       PTR_ERR(tiqdio_ind));
++		tiqdio_ind = NULL;
  	}
--	unlock_cpu_hotplug();
-+	put_online_cpus();
  }
  
- static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
+ static void
+ tiqdio_unregister_thinints(void)
+ {
+-	if (!register_thinint_result)
+-		s390_unregister_adapter_interrupt(&tiqdio_thinint_handler);
++	if (tiqdio_ind)
++		s390_unregister_adapter_interrupt(tiqdio_ind);
+ }
+ 
+ static int
+@@ -3768,8 +3767,8 @@ qdio_get_qdio_memory(void)
+ 	for (i=1;i<INDICATORS_PER_CACHELINE;i++)
+ 		indicator_used[i]=0;
+ 	indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE),
+-				   GFP_KERNEL);
+-       	if (!indicators)
++			     GFP_KERNEL);
++	if (!indicators)
+ 		return -ENOMEM;
+ 	return 0;
+ }
+@@ -3780,7 +3779,6 @@ qdio_release_qdio_memory(void)
+ 	kfree(indicators);
+ }
+ 
+-
+ static void
+ qdio_unregister_dbf_views(void)
+ {
+diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
+index 6d7aad1..37870e4 100644
+--- a/drivers/s390/cio/qdio.h
++++ b/drivers/s390/cio/qdio.h
+@@ -57,7 +57,7 @@
+ 					    of the queue to 0 */
+ 
+ #define QDIO_ESTABLISH_TIMEOUT (1*HZ)
+-#define QDIO_ACTIVATE_TIMEOUT ((5*HZ)>>10)
++#define QDIO_ACTIVATE_TIMEOUT (5*HZ)
+ #define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ)
+ #define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ)
+ #define QDIO_FORCE_CHECK_TIMEOUT (10*HZ)
+diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
+index 3561982..c307621 100644
+--- a/drivers/s390/net/claw.c
++++ b/drivers/s390/net/claw.c
+@@ -2416,7 +2416,7 @@ init_ccw_bk(struct net_device *dev)
+ 		    privptr->p_buff_pages_perwrite);
+ #endif
+                    if (p_buff==NULL) {
+-                        printk(KERN_INFO "%s:%s __get_free_pages"
++			printk(KERN_INFO "%s:%s __get_free_pages "
+ 			 	"for writes buf failed : get is for %d pages\n",
+ 				dev->name,
+ 				__FUNCTION__,
+diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
+index 0fd663b..7bfe8d7 100644
+--- a/drivers/s390/net/lcs.c
++++ b/drivers/s390/net/lcs.c
+@@ -1115,7 +1115,7 @@ list_modified:
+ 			rc = lcs_send_setipm(card, ipm);
+ 			spin_lock_irqsave(&card->ipm_lock, flags);
+ 			if (rc) {
+-				PRINT_INFO("Adding multicast address failed."
++				PRINT_INFO("Adding multicast address failed. "
+ 					   "Table possibly full!\n");
+ 				/* store ipm in failed list -> will be added
+ 				 * to ipm_list again, so a retry will be done
 diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
-index c7ea938..d6e93f1 100644
+index c7ea938..f3d893c 100644
 --- a/drivers/s390/net/netiucv.c
 +++ b/drivers/s390/net/netiucv.c
-@@ -2089,6 +2089,11 @@ static struct attribute_group netiucv_drv_attr_group = {
+@@ -198,8 +198,7 @@ struct iucv_connection {
+ /**
+  * Linked list of all connection structs.
+  */
+-static struct list_head iucv_connection_list =
+-	LIST_HEAD_INIT(iucv_connection_list);
++static LIST_HEAD(iucv_connection_list);
+ static DEFINE_RWLOCK(iucv_connection_rwlock);
+ 
+ /**
+@@ -2089,6 +2088,11 @@ static struct attribute_group netiucv_drv_attr_group = {
  	.attrs = netiucv_drv_attrs,
  };
  
@@ -124030,7 +142864,7 @@
  static void netiucv_banner(void)
  {
  	PRINT_INFO("NETIUCV driver initialized\n");
-@@ -2113,7 +2118,6 @@ static void __exit netiucv_exit(void)
+@@ -2113,7 +2117,6 @@ static void __exit netiucv_exit(void)
  		netiucv_unregister_device(dev);
  	}
  
@@ -124038,7 +142872,7 @@
  	driver_unregister(&netiucv_driver);
  	iucv_unregister(&netiucv_handler, 1);
  	iucv_unregister_dbf_views();
-@@ -2133,6 +2137,7 @@ static int __init netiucv_init(void)
+@@ -2133,6 +2136,7 @@ static int __init netiucv_init(void)
  	if (rc)
  		goto out_dbf;
  	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
@@ -124046,7 +142880,7 @@
  	rc = driver_register(&netiucv_driver);
  	if (rc) {
  		PRINT_ERR("NETIUCV: failed to register driver.\n");
-@@ -2140,18 +2145,9 @@ static int __init netiucv_init(void)
+@@ -2140,18 +2144,9 @@ static int __init netiucv_init(void)
  		goto out_iucv;
  	}
  
@@ -124065,6 +142899,41 @@
  out_iucv:
  	iucv_unregister(&netiucv_handler, 1);
  out_dbf:
+diff --git a/drivers/s390/net/qeth_proc.c b/drivers/s390/net/qeth_proc.c
+index f1ff165..46ecd03 100644
+--- a/drivers/s390/net/qeth_proc.c
++++ b/drivers/s390/net/qeth_proc.c
+@@ -146,7 +146,7 @@ qeth_procfile_seq_show(struct seq_file *s, void *it)
+ 	return 0;
+ }
+ 
+-static struct seq_operations qeth_procfile_seq_ops = {
++static const struct seq_operations qeth_procfile_seq_ops = {
+ 	.start = qeth_procfile_seq_start,
+ 	.stop  = qeth_procfile_seq_stop,
+ 	.next  = qeth_procfile_seq_next,
+@@ -264,7 +264,7 @@ qeth_perf_procfile_seq_show(struct seq_file *s, void *it)
+ 	return 0;
+ }
+ 
+-static struct seq_operations qeth_perf_procfile_seq_ops = {
++static const struct seq_operations qeth_perf_procfile_seq_ops = {
+ 	.start = qeth_procfile_seq_start,
+ 	.stop  = qeth_procfile_seq_stop,
+ 	.next  = qeth_procfile_seq_next,
+diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
+index 47bb47b..8735a41 100644
+--- a/drivers/s390/net/smsgiucv.c
++++ b/drivers/s390/net/smsgiucv.c
+@@ -42,7 +42,7 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver");
+ static struct iucv_path *smsg_path;
+ 
+ static DEFINE_SPINLOCK(smsg_list_lock);
+-static struct list_head smsg_list = LIST_HEAD_INIT(smsg_list);
++static LIST_HEAD(smsg_list);
+ 
+ static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
+ static void smsg_message_pending(struct iucv_path *, struct iucv_message *);
 diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
 index 0011849..874b55e 100644
 --- a/drivers/s390/scsi/zfcp_aux.c
@@ -124192,7 +143061,7 @@
   *  functions needed for reference/usage counting
   */
 diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
-index 07fa824..4f86c0e 100644
+index 07fa824..2dc8110 100644
 --- a/drivers/s390/scsi/zfcp_erp.c
 +++ b/drivers/s390/scsi/zfcp_erp.c
 @@ -131,7 +131,7 @@ static void zfcp_close_qdio(struct zfcp_adapter *adapter)
@@ -124223,6 +143092,15 @@
  			/* fsf_req still exists */
  			debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
  			debug_event(adapter->erp_dbf, 3, &erp_action->fsf_req,
+@@ -1285,7 +1286,7 @@ zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
+ 	 * note: no lock in subsequent strategy routines
+ 	 * (this allows these routine to call schedule, e.g.
+ 	 * kmalloc with such flags or qdio_initialize & friends)
+-	 * Note: in case of timeout, the seperate strategies will fail
++	 * Note: in case of timeout, the separate strategies will fail
+ 	 * anyhow. No need for a special action. Even worse, a nameserver
+ 	 * failure would not wake up waiting ports without the call.
+ 	 */
 @@ -1609,7 +1610,6 @@ static void zfcp_erp_scsi_scan(struct work_struct *work)
  	scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
  			 unit->scsi_lun, 0);
@@ -124273,9 +143151,27 @@
  extern void zfcp_sysfs_adapter_remove_files(struct device *);
  extern int  zfcp_sysfs_port_create_files(struct device *, u32);
 diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
-index ff866eb..fe57941 100644
+index ff866eb..e45f85f 100644
 --- a/drivers/s390/scsi/zfcp_fsf.c
 +++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -502,7 +502,7 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
+ 		fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ 		break;
+ 	case FSF_SQ_NO_RECOM:
+-		ZFCP_LOG_NORMAL("bug: No recommendation could be given for a"
++		ZFCP_LOG_NORMAL("bug: No recommendation could be given for a "
+ 				"problem on the adapter %s "
+ 				"Stopping all operations on this adapter. ",
+ 				zfcp_get_busid_by_adapter(fsf_req->adapter));
+@@ -813,7 +813,7 @@ zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *fsf_req)
+ 	read_unlock_irqrestore(&zfcp_data.config_lock, flags);
+ 
+ 	if (!port || (port->d_id != (status_buffer->d_id & ZFCP_DID_MASK))) {
+-		ZFCP_LOG_NORMAL("bug: Reopen port indication received for"
++		ZFCP_LOG_NORMAL("bug: Reopen port indication received for "
+ 				"nonexisting port with d_id 0x%06x on "
+ 				"adapter %s. Ignored.\n",
+ 				status_buffer->d_id & ZFCP_DID_MASK,
 @@ -1116,6 +1116,10 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
  		goto out;
  	}
@@ -124365,6 +143261,24 @@
   failed_send:
  	zfcp_fsf_req_free(fsf_req);
  
+@@ -2280,7 +2281,7 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
+ 				     &lock_flags, &fsf_req);
+ 	if (retval) {
+ 		ZFCP_LOG_INFO("error: Out of resources. Could not create an "
+-			      "exchange port data request for"
++			      "exchange port data request for "
+ 			      "the adapter %s.\n",
+ 			      zfcp_get_busid_by_adapter(adapter));
+ 		write_unlock_irqrestore(&adapter->request_queue.queue_lock,
+@@ -2339,7 +2340,7 @@ zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
+ 				0, NULL, &lock_flags, &fsf_req);
+ 	if (retval) {
+ 		ZFCP_LOG_INFO("error: Out of resources. Could not create an "
+-			      "exchange port data request for"
++			      "exchange port data request for "
+ 			      "the adapter %s.\n",
+ 			      zfcp_get_busid_by_adapter(adapter));
+ 		write_unlock_irqrestore(&adapter->request_queue.queue_lock,
 @@ -3592,6 +3593,12 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
  		goto failed_req_create;
  	}
@@ -124428,6 +143342,28 @@
   out:
  	write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
  	return fsf_req;
+@@ -4725,7 +4725,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
+ 	/* allocate new FSF request */
+ 	fsf_req = zfcp_fsf_req_alloc(pool, req_flags);
+ 	if (unlikely(NULL == fsf_req)) {
+-		ZFCP_LOG_DEBUG("error: Could not put an FSF request into"
++		ZFCP_LOG_DEBUG("error: Could not put an FSF request into "
+ 			       "the outbound (send) queue.\n");
+ 		ret = -ENOMEM;
+ 		goto failed_fsf_req;
+diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
+index 51d92b1..22fdc17 100644
+--- a/drivers/s390/scsi/zfcp_qdio.c
++++ b/drivers/s390/scsi/zfcp_qdio.c
+@@ -529,7 +529,7 @@ zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req)
+ 
+ 
+ /**
+- * zfcp_qdio_sbale_fill - set address and lenght in current SBALE
++ * zfcp_qdio_sbale_fill - set address and length in current SBALE
+  *	on request_queue
+  */
+ static void
 diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
 index abae202..b9daf5c 100644
 --- a/drivers/s390/scsi/zfcp_scsi.c
@@ -157808,7 +176744,7 @@
  	  Watchdog timer embedded into AT32AP700x devices. This will reboot
  	  your system when the timeout is reached.
 diff --git a/fs/Kconfig b/fs/Kconfig
-index 781b47d..b6df18f 100644
+index 781b47d..9656139 100644
 --- a/fs/Kconfig
 +++ b/fs/Kconfig
 @@ -440,14 +440,8 @@ config OCFS2_FS
@@ -157839,7 +176775,77 @@
  	help
  	  configfs is a ram-based filesystem that provides the converse
  	  of sysfs's functionality. Where sysfs is a filesystem-based
-@@ -2130,4 +2124,3 @@ source "fs/nls/Kconfig"
+@@ -1905,13 +1899,15 @@ config CIFS
+ 	  file servers such as Windows 2000 (including Windows 2003, NT 4  
+ 	  and Windows XP) as well by Samba (which provides excellent CIFS
+ 	  server support for Linux and many other operating systems). Limited
+-	  support for OS/2 and Windows ME and similar servers is provided as well.
+-
+-	  The intent of the cifs module is to provide an advanced
+-	  network file system client for mounting to CIFS compliant servers,
+-	  including support for dfs (hierarchical name space), secure per-user
+-	  session establishment, safe distributed caching (oplock), optional
+-	  packet signing, Unicode and other internationalization improvements. 
++	  support for OS/2 and Windows ME and similar servers is provided as
++	  well.
++
++	  The cifs module provides an advanced network file system
++	  client for mounting to CIFS compliant servers.  It includes
++	  support for DFS (hierarchical name space), secure per-user
++	  session establishment via Kerberos or NTLM or NTLMv2,
++	  safe distributed caching (oplock), optional packet
++	  signing, Unicode and other internationalization improvements.
+ 	  If you need to mount to Samba or Windows from this machine, say Y.
+ 
+ config CIFS_STATS
+@@ -1943,7 +1939,8 @@ config CIFS_WEAK_PW_HASH
+ 	  (since 1997) support stronger NTLM (and even NTLMv2 and Kerberos)
+ 	  security mechanisms. These hash the password more securely
+ 	  than the mechanisms used in the older LANMAN version of the
+-          SMB protocol needed to establish sessions with old SMB servers.
++	  SMB protocol but LANMAN based authentication is needed to
++	  establish sessions with some old SMB servers.
+ 
+ 	  Enabling this option allows the cifs module to mount to older
+ 	  LANMAN based servers such as OS/2 and Windows 95, but such
+@@ -1951,8 +1948,8 @@ config CIFS_WEAK_PW_HASH
+ 	  security mechanisms if you are on a public network.  Unless you
+ 	  have a need to access old SMB servers (and are on a private 
+ 	  network) you probably want to say N.  Even if this support
+-	  is enabled in the kernel build, they will not be used
+-	  automatically. At runtime LANMAN mounts are disabled but
++	  is enabled in the kernel build, LANMAN authentication will not be
++	  used automatically. At runtime LANMAN mounts are disabled but
+ 	  can be set to required (or optional) either in
+ 	  /proc/fs/cifs (see fs/cifs/README for more detail) or via an
+ 	  option on the mount command. This support is disabled by 
+@@ -2018,12 +2015,22 @@ config CIFS_UPCALL
+ 	  depends on CIFS_EXPERIMENTAL
+ 	  depends on KEYS
+ 	  help
+-	    Enables an upcall mechanism for CIFS which will be used to contact
+-	    userspace helper utilities to provide SPNEGO packaged Kerberos
+-	    tickets which are needed to mount to certain secure servers
++	    Enables an upcall mechanism for CIFS which accesses
++	    userspace helper utilities to provide SPNEGO packaged (RFC 4178)
++	    Kerberos tickets which are needed to mount to certain secure servers
+ 	    (for which more secure Kerberos authentication is required). If
+ 	    unsure, say N.
+ 
++config CIFS_DFS_UPCALL
++	  bool "DFS feature support (EXPERIMENTAL)"
++	  depends on CIFS_EXPERIMENTAL
++	  depends on KEYS
++	  help
++	    Enables an upcall mechanism for CIFS which contacts userspace
++	    helper utilities to provide server name resolution (host names to
++	    IP addresses) which is needed for implicit mounts of DFS junction
++	    points. If unsure, say N.
++
+ config NCP_FS
+ 	tristate "NCP file system support (to mount NetWare volumes)"
+ 	depends on IPX!=n || INET
+@@ -2130,4 +2137,3 @@ source "fs/nls/Kconfig"
  source "fs/dlm/Kconfig"
  
  endmenu
@@ -157903,6 +176909,1681 @@
  	cdev->ops = fops;
  }
  
+diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
+index a609599..edd2483 100644
+--- a/fs/cifs/CHANGES
++++ b/fs/cifs/CHANGES
+@@ -3,7 +3,10 @@ Version 1.52
+ Fix oops on second mount to server when null auth is used.
+ Enable experimental Kerberos support.  Return writebehind errors on flush
+ and sync so that events like out of disk space get reported properly on
+-cached files.
++cached files. Fix setxattr failure to certain Samba versions. Fix mount
++of second share to disconnected server session (autoreconnect on this).
++Add ability to modify cifs acls for handling chmod (when mounted with
++cifsacl flag).
+ 
+ Version 1.51
+ ------------
+diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
+index 45e42fb..6ba43fb 100644
+--- a/fs/cifs/Makefile
++++ b/fs/cifs/Makefile
+@@ -9,3 +9,5 @@ cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \
+ 	  readdir.o ioctl.o sess.o export.o cifsacl.o
+ 
+ cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o
++
++cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o
+diff --git a/fs/cifs/README b/fs/cifs/README
+index bf11329..c623e2f 100644
+--- a/fs/cifs/README
++++ b/fs/cifs/README
+@@ -56,7 +56,8 @@ the CIFS VFS web site) copy it to the same directory in which mount.smbfs and
+ similar files reside (usually /sbin).  Although the helper software is not  
+ required, mount.cifs is recommended.  Eventually the Samba 3.0 utility program 
+ "net" may also be helpful since it may someday provide easier mount syntax for
+-users who are used to Windows e.g.  net use <mount point> <UNC name or cifs URL>
++users who are used to Windows e.g.
++	net use <mount point> <UNC name or cifs URL>
+ Note that running the Winbind pam/nss module (logon service) on all of your
+ Linux clients is useful in mapping Uids and Gids consistently across the
+ domain to the proper network user.  The mount.cifs mount helper can be
+@@ -248,7 +249,7 @@ A partial list of the supported mount options follows:
+ 		the CIFS session.
+   password	The user password.  If the mount helper is
+ 		installed, the user will be prompted for password
+-		if it is not supplied.
++		if not supplied.
+   ip		The ip address of the target server
+   unc		The target server Universal Network Name (export) to 
+ 		mount.	
+@@ -283,7 +284,7 @@ A partial list of the supported mount options follows:
+ 		can be enabled by specifying file_mode and dir_mode on 
+ 		the client.  Note that the mount.cifs helper must be
+ 		at version 1.10 or higher to support specifying the uid
+-		(or gid) in non-numberic form.
++		(or gid) in non-numeric form.
+   gid		Set the default gid for inodes (similar to above).
+   file_mode     If CIFS Unix extensions are not supported by the server
+ 		this overrides the default mode for file inodes.
+@@ -417,9 +418,10 @@ A partial list of the supported mount options follows:
+   acl   	Allow setfacl and getfacl to manage posix ACLs if server
+ 		supports them.  (default)
+   noacl 	Do not allow setfacl and getfacl calls on this mount
+-  user_xattr    Allow getting and setting user xattrs as OS/2 EAs (extended
+-		attributes) to the server (default) e.g. via setfattr 
+-		and getfattr utilities. 
++  user_xattr    Allow getting and setting user xattrs (those attributes whose
++		name begins with "user." or "os2.") as OS/2 EAs (extended
++		attributes) to the server.  This allows support of the
++		setfattr and getfattr utilities. (default)
+   nouser_xattr  Do not allow getfattr/setfattr to get/set/list xattrs 
+   mapchars      Translate six of the seven reserved characters (not backslash)
+ 			*?<>|:
+@@ -434,6 +436,7 @@ A partial list of the supported mount options follows:
+  nomapchars     Do not translate any of these seven characters (default).
+  nocase         Request case insensitive path name matching (case
+ 		sensitive is the default if the server suports it).
++		(mount option "ignorecase" is identical to "nocase")
+  posixpaths     If CIFS Unix extensions are supported, attempt to
+ 		negotiate posix path name support which allows certain
+ 		characters forbidden in typical CIFS filenames, without
+@@ -485,6 +488,9 @@ A partial list of the supported mount options follows:
+ 			ntlmv2i Use NTLMv2 password hashing with packet signing
+ 			lanman  (if configured in kernel config) use older
+ 				lanman hash
++hard		Retry file operations if server is not responding
++soft		Limit retries to unresponsive servers (usually only
++		one retry) before returning an error.  (default)
+ 
+ The mount.cifs mount helper also accepts a few mount options before -o
+ including:
+@@ -535,8 +541,8 @@ SecurityFlags		Flags which control security negotiation and
+ 			must use NTLM					0x02002
+ 			may use NTLMv2					0x00004
+ 			must use NTLMv2					0x04004
+-			may use Kerberos security (not implemented yet) 0x00008
+-			must use Kerberos (not implemented yet)         0x08008
++			may use Kerberos security			0x00008
++			must use Kerberos				0x08008
+ 			may use lanman (weak) password hash  		0x00010
+ 			must use lanman password hash			0x10010
+ 			may use plaintext passwords    			0x00020
+@@ -626,6 +632,6 @@ returned success.
+ 	
+ Also note that "cat /proc/fs/cifs/DebugData" will display information about 
+ the active sessions and the shares that are mounted.
+-Enabling Kerberos (extended security) works when CONFIG_CIFS_EXPERIMENTAL is enabled
+-but requires a user space helper (from the Samba project). NTLM and NTLMv2 and
+-LANMAN support do not require this helpr.
++Enabling Kerberos (extended security) works when CONFIG_CIFS_EXPERIMENTAL is
++on but requires a user space helper (from the Samba project). NTLM and NTLMv2 and
++LANMAN support do not require this helper.
+diff --git a/fs/cifs/TODO b/fs/cifs/TODO
+index a8852c2..92c9fea 100644
+--- a/fs/cifs/TODO
++++ b/fs/cifs/TODO
+@@ -1,4 +1,4 @@
+-Version 1.49 April 26, 2007
++Version 1.52 January 3, 2008
+ 
+ A Partial List of Missing Features
+ ==================================
+@@ -16,16 +16,14 @@ SecurityDescriptors
+ c) Better pam/winbind integration (e.g. to handle uid mapping
+ better)
+ 
+-d) Verify that Kerberos signing works
+-
+-e) Cleanup now unneeded SessSetup code in
++d) Cleanup now unneeded SessSetup code in
+ fs/cifs/connect.c and add back in NTLMSSP code if any servers
+ need it
+ 
+-f) MD5-HMAC signing SMB PDUs when SPNEGO style SessionSetup 
+-used (Kerberos or NTLMSSP). Signing alreadyimplemented for NTLM
+-and raw NTLMSSP already. This is important when enabling
+-extended security and mounting to Windows 2003 Servers
++e) ms-dfs and ms-dfs host name resolution cleanup
++
++f) fix NTLMv2 signing when two mounts with different users to same
++server.
+ 
+ g) Directory entry caching relies on a 1 second timer, rather than 
+ using FindNotify or equivalent.  - (started)
+diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
+new file mode 100644
+index 0000000..413ee23
+--- /dev/null
++++ b/fs/cifs/cifs_dfs_ref.c
+@@ -0,0 +1,377 @@
++/*
++ *   Contains the CIFS DFS referral mounting routines used for handling
++ *   traversal via DFS junction point
++ *
++ *   Copyright (c) 2007 Igor Mammedov
++ *   Copyright (C) International Business Machines  Corp., 2008
++ *   Author(s): Igor Mammedov (niallain at gmail.com)
++ *		Steve French (sfrench at us.ibm.com)
++ *   This program is free software; you can redistribute it and/or
++ *   modify it under the terms of the GNU General Public License
++ *   as published by the Free Software Foundation; either version
++ *   2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/dcache.h>
++#include <linux/mount.h>
++#include <linux/namei.h>
++#include <linux/vfs.h>
++#include <linux/fs.h>
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifsfs.h"
++#include "dns_resolve.h"
++#include "cifs_debug.h"
++
++LIST_HEAD(cifs_dfs_automount_list);
++
++/*
++ * DFS functions
++*/
++
++void dfs_shrink_umount_helper(struct vfsmount *vfsmnt)
++{
++	mark_mounts_for_expiry(&cifs_dfs_automount_list);
++	mark_mounts_for_expiry(&cifs_dfs_automount_list);
++	shrink_submounts(vfsmnt, &cifs_dfs_automount_list);
++}
++
++/**
++ * cifs_get_share_name	-	extracts share name from UNC
++ * @node_name:	pointer to UNC string
++ *
++ * Extracts sharename form full UNC.
++ * i.e. strips from UNC trailing path that is not part of share
++ * name and fixup missing '\' in the begining of DFS node refferal
++ * if neccessary.
++ * Returns pointer to share name on success or NULL on error.
++ * Caller is responsible for freeing returned string.
++ */
++static char *cifs_get_share_name(const char *node_name)
++{
++	int len;
++	char *UNC;
++	char *pSep;
++
++	len = strlen(node_name);
++	UNC = kmalloc(len+2 /*for term null and additional \ if it's missed */,
++			 GFP_KERNEL);
++	if (!UNC)
++		return NULL;
++
++	/* get share name and server name */
++	if (node_name[1] != '\\') {
++		UNC[0] = '\\';
++		strncpy(UNC+1, node_name, len);
++		len++;
++		UNC[len] = 0;
++	} else {
++		strncpy(UNC, node_name, len);
++		UNC[len] = 0;
++	}
++
++	/* find server name end */
++	pSep = memchr(UNC+2, '\\', len-2);
++	if (!pSep) {
++		cERROR(1, ("%s: no server name end in node name: %s",
++			__FUNCTION__, node_name));
++		kfree(UNC);
++		return NULL;
++	}
++
++	/* find sharename end */
++	pSep++;
++	pSep = memchr(UNC+(pSep-UNC), '\\', len-(pSep-UNC));
++	if (!pSep) {
++		cERROR(1, ("%s:2 cant find share name in node name: %s",
++			__FUNCTION__, node_name));
++		kfree(UNC);
++		return NULL;
++	}
++	/* trim path up to sharename end
++	 *          * now we have share name in UNC */
++	*pSep = 0;
++
++	return UNC;
++}
++
++
++/**
++ * compose_mount_options	-	creates mount options for refferral
++ * @sb_mountdata:	parent/root DFS mount options (template)
++ * @ref_unc:		refferral server UNC
++ * @devname:		pointer for saving device name
++ *
++ * creates mount options for submount based on template options sb_mountdata
++ * and replacing unc,ip,prefixpath options with ones we've got form ref_unc.
++ *
++ * Returns: pointer to new mount options or ERR_PTR.
++ * Caller is responcible for freeing retunrned value if it is not error.
++ */
++static char *compose_mount_options(const char *sb_mountdata,
++				   const char *ref_unc,
++				   char **devname)
++{
++	int rc;
++	char *mountdata;
++	int md_len;
++	char *tkn_e;
++	char *srvIP = NULL;
++	char sep = ',';
++	int off, noff;
++
++	if (sb_mountdata == NULL)
++		return ERR_PTR(-EINVAL);
++
++	*devname = cifs_get_share_name(ref_unc);
++	rc = dns_resolve_server_name_to_ip(*devname, &srvIP);
++	if (rc != 0) {
++		cERROR(1, ("%s: Failed to resolve server part of %s to IP",
++			  __FUNCTION__, *devname));
++		mountdata = ERR_PTR(rc);
++		goto compose_mount_options_out;
++	}
++	md_len = strlen(sb_mountdata) + strlen(srvIP) + strlen(ref_unc) + 3;
++	mountdata = kzalloc(md_len+1, GFP_KERNEL);
++	if (mountdata == NULL) {
++		mountdata = ERR_PTR(-ENOMEM);
++		goto compose_mount_options_out;
++	}
++
++	/* copy all options except of unc,ip,prefixpath */
++	off = 0;
++	if (strncmp(sb_mountdata, "sep=", 4) == 0) {
++			sep = sb_mountdata[4];
++			strncpy(mountdata, sb_mountdata, 5);
++			off += 5;
++	}
++	while ((tkn_e = strchr(sb_mountdata+off, sep))) {
++		noff = (tkn_e - (sb_mountdata+off)) + 1;
++		if (strnicmp(sb_mountdata+off, "unc=", 4) == 0) {
++			off += noff;
++			continue;
++		}
++		if (strnicmp(sb_mountdata+off, "ip=", 3) == 0) {
++			off += noff;
++			continue;
++		}
++		if (strnicmp(sb_mountdata+off, "prefixpath=", 3) == 0) {
++			off += noff;
++			continue;
++		}
++		strncat(mountdata, sb_mountdata+off, noff);
++		off += noff;
++	}
++	strcat(mountdata, sb_mountdata+off);
++	mountdata[md_len] = '\0';
++
++	/* copy new IP and ref share name */
++	strcat(mountdata, ",ip=");
++	strcat(mountdata, srvIP);
++	strcat(mountdata, ",unc=");
++	strcat(mountdata, *devname);
++
++	/* find & copy prefixpath */
++	tkn_e = strchr(ref_unc+2, '\\');
++	if (tkn_e) {
++		tkn_e = strchr(tkn_e+1, '\\');
++		if (tkn_e) {
++			strcat(mountdata, ",prefixpath=");
++			strcat(mountdata, tkn_e);
++		}
++	}
++
++	/*cFYI(1,("%s: parent mountdata: %s", __FUNCTION__,sb_mountdata));*/
++	/*cFYI(1, ("%s: submount mountdata: %s", __FUNCTION__, mountdata ));*/
++
++compose_mount_options_out:
++	kfree(srvIP);
++	return mountdata;
++}
++
++
++static struct vfsmount *cifs_dfs_do_refmount(const struct vfsmount *mnt_parent,
++		struct dentry *dentry, char *ref_unc)
++{
++	struct cifs_sb_info *cifs_sb;
++	struct vfsmount *mnt;
++	char *mountdata;
++	char *devname = NULL;
++
++	cifs_sb = CIFS_SB(dentry->d_inode->i_sb);
++	mountdata = compose_mount_options(cifs_sb->mountdata,
++						ref_unc, &devname);
++
++	if (IS_ERR(mountdata))
++		return (struct vfsmount *)mountdata;
++
++	mnt = vfs_kern_mount(&cifs_fs_type, 0, devname, mountdata);
++	kfree(mountdata);
++	kfree(devname);
++	return mnt;
++
++}
++
++static char *build_full_dfs_path_from_dentry(struct dentry *dentry)
++{
++	char *full_path = NULL;
++	char *search_path;
++	char *tmp_path;
++	size_t l_max_len;
++	struct cifs_sb_info *cifs_sb;
++
++	if (dentry->d_inode == NULL)
++		return NULL;
++
++	cifs_sb = CIFS_SB(dentry->d_inode->i_sb);
++
++	if (cifs_sb->tcon == NULL)
++		return NULL;
++
++	search_path = build_path_from_dentry(dentry);
++	if (search_path == NULL)
++		return NULL;
++
++	if (cifs_sb->tcon->Flags & SMB_SHARE_IS_IN_DFS) {
++		/* we should use full path name to correct working with DFS */
++		l_max_len = strnlen(cifs_sb->tcon->treeName, MAX_TREE_SIZE+1) +
++					strnlen(search_path, MAX_PATHCONF) + 1;
++		tmp_path = kmalloc(l_max_len, GFP_KERNEL);
++		if (tmp_path == NULL) {
++			kfree(search_path);
++			return NULL;
++		}
++		strncpy(tmp_path, cifs_sb->tcon->treeName, l_max_len);
++		strcat(tmp_path, search_path);
++		tmp_path[l_max_len-1] = 0;
++		full_path = tmp_path;
++		kfree(search_path);
++	} else {
++		full_path = search_path;
++	}
++	return full_path;
++}
++
++static int add_mount_helper(struct vfsmount *newmnt, struct nameidata *nd,
++				struct list_head *mntlist)
++{
++	/* stolen from afs code */
++	int err;
++
++	mntget(newmnt);
++	err = do_add_mount(newmnt, nd, nd->mnt->mnt_flags, mntlist);
++	switch (err) {
++	case 0:
++		dput(nd->dentry);
++		mntput(nd->mnt);
++		nd->mnt = newmnt;
++		nd->dentry = dget(newmnt->mnt_root);
++		break;
++	case -EBUSY:
++		/* someone else made a mount here whilst we were busy */
++		while (d_mountpoint(nd->dentry) &&
++		       follow_down(&nd->mnt, &nd->dentry))
++			;
++		err = 0;
++	default:
++		mntput(newmnt);
++		break;
++	}
++	return err;
++}
++
++static void dump_referral(const struct dfs_info3_param *ref)
++{
++	cFYI(1, ("DFS: ref path: %s", ref->path_name));
++	cFYI(1, ("DFS: node path: %s", ref->node_name));
++	cFYI(1, ("DFS: fl: %hd, srv_type: %hd", ref->flags, ref->server_type));
++	cFYI(1, ("DFS: ref_flags: %hd, path_consumed: %hd", ref->ref_flag,
++				ref->PathConsumed));
++}
++
++
++static void*
++cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
++{
++	struct dfs_info3_param *referrals = NULL;
++	unsigned int num_referrals = 0;
++	struct cifs_sb_info *cifs_sb;
++	struct cifsSesInfo *ses;
++	char *full_path = NULL;
++	int xid, i;
++	int rc = 0;
++	struct vfsmount *mnt = ERR_PTR(-ENOENT);
++
++	cFYI(1, ("in %s", __FUNCTION__));
++	BUG_ON(IS_ROOT(dentry));
++
++	xid = GetXid();
++
++	dput(nd->dentry);
++	nd->dentry = dget(dentry);
++
++	cifs_sb = CIFS_SB(dentry->d_inode->i_sb);
++	ses = cifs_sb->tcon->ses;
++
++	if (!ses) {
++		rc = -EINVAL;
++		goto out_err;
++	}
++
++	full_path = build_full_dfs_path_from_dentry(dentry);
++	if (full_path == NULL) {
++		rc = -ENOMEM;
++		goto out_err;
++	}
++
++	rc = get_dfs_path(xid, ses , full_path, cifs_sb->local_nls,
++		&num_referrals, &referrals,
++		cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
++
++	for (i = 0; i < num_referrals; i++) {
++		dump_referral(referrals+i);
++		/* connect to a storage node */
++		if (referrals[i].flags & DFSREF_STORAGE_SERVER) {
++			int len;
++			len = strlen(referrals[i].node_name);
++			if (len < 2) {
++				cERROR(1, ("%s: Net Address path too short: %s",
++					__FUNCTION__, referrals[i].node_name));
++				rc = -EINVAL;
++				goto out_err;
++			}
++			mnt = cifs_dfs_do_refmount(nd->mnt, nd->dentry,
++						referrals[i].node_name);
++			cFYI(1, ("%s: cifs_dfs_do_refmount:%s , mnt:%p",
++					 __FUNCTION__,
++					referrals[i].node_name, mnt));
++
++			/* complete mount procedure if we accured submount */
++			if (!IS_ERR(mnt))
++				break;
++		}
++	}
++
++	/* we need it cause for() above could exit without valid submount */
++	rc = PTR_ERR(mnt);
++	if (IS_ERR(mnt))
++		goto out_err;
++
++	nd->mnt->mnt_flags |= MNT_SHRINKABLE;
++	rc = add_mount_helper(mnt, nd, &cifs_dfs_automount_list);
++
++out:
++	FreeXid(xid);
++	free_dfs_info_array(referrals, num_referrals);
++	kfree(full_path);
++	cFYI(1, ("leaving %s" , __FUNCTION__));
++	return ERR_PTR(rc);
++out_err:
++	path_release(nd);
++	goto out;
++}
++
++struct inode_operations cifs_dfs_referral_inode_operations = {
++	.follow_link = cifs_dfs_follow_mountpoint,
++};
++
+diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
+index 34af556..8ad2330 100644
+--- a/fs/cifs/cifs_fs_sb.h
++++ b/fs/cifs/cifs_fs_sb.h
+@@ -43,6 +43,9 @@ struct cifs_sb_info {
+ 	mode_t	mnt_dir_mode;
+ 	int     mnt_cifs_flags;
+ 	int	prepathlen;
+-	char   *prepath;
++	char   *prepath; /* relative path under the share to mount to */
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	char   *mountdata; /* mount options received at mount time */
++#endif
+ };
+ #endif				/* _CIFS_FS_SB_H */
+diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
+index 1529d2b..d543acc 100644
+--- a/fs/cifs/cifs_spnego.c
++++ b/fs/cifs/cifs_spnego.c
+@@ -122,11 +122,13 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
+ 	cFYI(1, ("key description = %s", description));
+ 	spnego_key = request_key(&cifs_spnego_key_type, description, "");
+ 
++#ifdef CONFIG_CIFS_DEBUG2
+ 	if (cifsFYI && !IS_ERR(spnego_key)) {
+ 		struct cifs_spnego_msg *msg = spnego_key->payload.data;
+-		cifs_dump_mem("SPNEGO reply blob:", msg->data,
+-				msg->secblob_len + msg->sesskey_len);
++		cifs_dump_mem("SPNEGO reply blob:", msg->data, min(1024,
++				msg->secblob_len + msg->sesskey_len));
+ 	}
++#endif /* CONFIG_CIFS_DEBUG2 */
+ 
+ out:
+ 	kfree(description);
+diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
+index c312adc..a7035bd 100644
+--- a/fs/cifs/cifsacl.c
++++ b/fs/cifs/cifsacl.c
+@@ -129,6 +129,54 @@ int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
+ 	return (1); /* sids compare/match */
+ }
+ 
++
++/* copy ntsd, owner sid, and group sid from a security descriptor to another */
++static void copy_sec_desc(const struct cifs_ntsd *pntsd,
++				struct cifs_ntsd *pnntsd, __u32 sidsoffset)
++{
++	int i;
++
++	struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
++	struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
++
++	/* copy security descriptor control portion */
++	pnntsd->revision = pntsd->revision;
++	pnntsd->type = pntsd->type;
++	pnntsd->dacloffset = cpu_to_le32(sizeof(struct cifs_ntsd));
++	pnntsd->sacloffset = 0;
++	pnntsd->osidoffset = cpu_to_le32(sidsoffset);
++	pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid));
++
++	/* copy owner sid */
++	owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
++				le32_to_cpu(pntsd->osidoffset));
++	nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
++
++	nowner_sid_ptr->revision = owner_sid_ptr->revision;
++	nowner_sid_ptr->num_subauth = owner_sid_ptr->num_subauth;
++	for (i = 0; i < 6; i++)
++		nowner_sid_ptr->authority[i] = owner_sid_ptr->authority[i];
++	for (i = 0; i < 5; i++)
++		nowner_sid_ptr->sub_auth[i] = owner_sid_ptr->sub_auth[i];
++
++	/* copy group sid */
++	group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
++				le32_to_cpu(pntsd->gsidoffset));
++	ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
++					sizeof(struct cifs_sid));
++
++	ngroup_sid_ptr->revision = group_sid_ptr->revision;
++	ngroup_sid_ptr->num_subauth = group_sid_ptr->num_subauth;
++	for (i = 0; i < 6; i++)
++		ngroup_sid_ptr->authority[i] = group_sid_ptr->authority[i];
++	for (i = 0; i < 5; i++)
++		ngroup_sid_ptr->sub_auth[i] =
++				cpu_to_le32(group_sid_ptr->sub_auth[i]);
++
++	return;
++}
++
++
+ /*
+    change posix mode to reflect permissions
+    pmode is the existing mode (we only want to overwrite part of this
+@@ -220,6 +268,33 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
+ 	return;
+ }
+ 
++static __le16 fill_ace_for_sid(struct cifs_ace *pntace,
++			const struct cifs_sid *psid, __u64 nmode, umode_t bits)
++{
++	int i;
++	__u16 size = 0;
++	__u32 access_req = 0;
++
++	pntace->type = ACCESS_ALLOWED;
++	pntace->flags = 0x0;
++	mode_to_access_flags(nmode, bits, &access_req);
++	if (!access_req)
++		access_req = SET_MINIMUM_RIGHTS;
++	pntace->access_req = cpu_to_le32(access_req);
++
++	pntace->sid.revision = psid->revision;
++	pntace->sid.num_subauth = psid->num_subauth;
++	for (i = 0; i < 6; i++)
++		pntace->sid.authority[i] = psid->authority[i];
++	for (i = 0; i < psid->num_subauth; i++)
++		pntace->sid.sub_auth[i] = psid->sub_auth[i];
++
++	size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4);
++	pntace->size = cpu_to_le16(size);
++
++	return (size);
++}
++
+ 
+ #ifdef CONFIG_CIFS_DEBUG2
+ static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
+@@ -243,7 +318,7 @@ static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
+ 		int i;
+ 		cFYI(1, ("ACE revision %d num_auth %d type %d flags %d size %d",
+ 			pace->sid.revision, pace->sid.num_subauth, pace->type,
+-			pace->flags, pace->size));
++			pace->flags, le16_to_cpu(pace->size)));
+ 		for (i = 0; i < num_subauth; ++i) {
+ 			cFYI(1, ("ACE sub_auth[%d]: 0x%x", i,
+ 				le32_to_cpu(pace->sid.sub_auth[i])));
+@@ -346,6 +421,28 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
+ }
+ 
+ 
++static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid,
++			struct cifs_sid *pgrpsid, __u64 nmode)
++{
++	__le16 size = 0;
++	struct cifs_acl *pnndacl;
++
++	pnndacl = (struct cifs_acl *)((char *)pndacl + sizeof(struct cifs_acl));
++
++	size += fill_ace_for_sid((struct cifs_ace *) ((char *)pnndacl + size),
++					pownersid, nmode, S_IRWXU);
++	size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
++					pgrpsid, nmode, S_IRWXG);
++	size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
++					 &sid_everyone, nmode, S_IRWXO);
++
++	pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl));
++	pndacl->num_aces = 3;
++
++	return (0);
++}
++
++
+ static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
+ {
+ 	/* BB need to add parm so we can store the SID BB */
+@@ -432,6 +529,46 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len,
+ }
+ 
+ 
++/* Convert permission bits from mode to equivalent CIFS ACL */
++static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
++				int acl_len, struct inode *inode, __u64 nmode)
++{
++	int rc = 0;
++	__u32 dacloffset;
++	__u32 ndacloffset;
++	__u32 sidsoffset;
++	struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
++	struct cifs_acl *dacl_ptr = NULL;  /* no need for SACL ptr */
++	struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
++
++	if ((inode == NULL) || (pntsd == NULL) || (pnntsd == NULL))
++		return (-EIO);
++
++	owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
++				le32_to_cpu(pntsd->osidoffset));
++	group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
++				le32_to_cpu(pntsd->gsidoffset));
++
++	dacloffset = le32_to_cpu(pntsd->dacloffset);
++	dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
++
++	ndacloffset = sizeof(struct cifs_ntsd);
++	ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
++	ndacl_ptr->revision = dacl_ptr->revision;
++	ndacl_ptr->size = 0;
++	ndacl_ptr->num_aces = 0;
++
++	rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr, nmode);
++
++	sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
++
++	/* copy security descriptor control portion and owner and group sid */
++	copy_sec_desc(pntsd, pnntsd, sidsoffset);
++
++	return (rc);
++}
++
++
+ /* Retrieve an ACL from the server */
+ static struct cifs_ntsd *get_cifs_acl(u32 *pacllen, struct inode *inode,
+ 				       const char *path)
+@@ -487,6 +624,64 @@ static struct cifs_ntsd *get_cifs_acl(u32 *pacllen, struct inode *inode,
+ 	return pntsd;
+ }
+ 
++/* Set an ACL on the server */
++static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
++				struct inode *inode, const char *path)
++{
++	struct cifsFileInfo *open_file;
++	int unlock_file = FALSE;
++	int xid;
++	int rc = -EIO;
++	__u16 fid;
++	struct super_block *sb;
++	struct cifs_sb_info *cifs_sb;
++
++#ifdef CONFIG_CIFS_DEBUG2
++	cFYI(1, ("set ACL for %s from mode 0x%x", path, inode->i_mode));
++#endif
++
++	if (!inode)
++		return (rc);
++
++	sb = inode->i_sb;
++	if (sb == NULL)
++		return (rc);
++
++	cifs_sb = CIFS_SB(sb);
++	xid = GetXid();
++
++	open_file = find_readable_file(CIFS_I(inode));
++	if (open_file) {
++		unlock_file = TRUE;
++		fid = open_file->netfid;
++	} else {
++		int oplock = FALSE;
++		/* open file */
++		rc = CIFSSMBOpen(xid, cifs_sb->tcon, path, FILE_OPEN,
++				WRITE_DAC, 0, &fid, &oplock, NULL,
++				cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
++					CIFS_MOUNT_MAP_SPECIAL_CHR);
++		if (rc != 0) {
++			cERROR(1, ("Unable to open file to set ACL"));
++			FreeXid(xid);
++			return (rc);
++		}
++	}
++
++	rc = CIFSSMBSetCIFSACL(xid, cifs_sb->tcon, fid, pnntsd, acllen);
++#ifdef CONFIG_CIFS_DEBUG2
++	cFYI(1, ("SetCIFSACL rc = %d", rc));
++#endif
++	if (unlock_file == TRUE)
++		atomic_dec(&open_file->wrtPending);
++	else
++		CIFSSMBClose(xid, cifs_sb->tcon, fid);
++
++	FreeXid(xid);
++
++	return (rc);
++}
++
+ /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
+ void acl_to_uid_mode(struct inode *inode, const char *path)
+ {
+@@ -510,24 +705,53 @@ void acl_to_uid_mode(struct inode *inode, const char *path)
+ }
+ 
+ /* Convert mode bits to an ACL so we can update the ACL on the server */
+-int mode_to_acl(struct inode *inode, const char *path)
++int mode_to_acl(struct inode *inode, const char *path, __u64 nmode)
+ {
+ 	int rc = 0;
+ 	__u32 acllen = 0;
+-	struct cifs_ntsd *pntsd = NULL;
++	struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
++	struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
+ 
++#ifdef CONFIG_CIFS_DEBUG2
+ 	cFYI(1, ("set ACL from mode for %s", path));
++#endif
+ 
+ 	/* Get the security descriptor */
+ 	pntsd = get_cifs_acl(&acllen, inode, path);
+ 
+-	/* Add/Modify the three ACEs for owner, group, everyone
+-	   while retaining the other ACEs */
++	/* Add three ACEs for owner, group, everyone getting rid of
++	   other ACEs as chmod disables ACEs and set the security descriptor */
+ 
+-	/* Set the security descriptor */
++	if (pntsd) {
++		/* allocate memory for the smb header,
++		   set security descriptor request security descriptor
++		   parameters, and secuirty descriptor itself */
+ 
++		pnntsd = kmalloc(acllen, GFP_KERNEL);
++		if (!pnntsd) {
++			cERROR(1, ("Unable to allocate security descriptor"));
++			kfree(pntsd);
++			return (-ENOMEM);
++		}
+ 
+-	kfree(pntsd);
+-	return rc;
++		rc = build_sec_desc(pntsd, pnntsd, acllen, inode, nmode);
++
++#ifdef CONFIG_CIFS_DEBUG2
++		cFYI(1, ("build_sec_desc rc: %d", rc));
++#endif
++
++		if (!rc) {
++			/* Set the security descriptor */
++			rc = set_cifs_acl(pnntsd, acllen, inode, path);
++#ifdef CONFIG_CIFS_DEBUG2
++			cFYI(1, ("set_cifs_acl rc: %d", rc));
++#endif
++		}
++
++		kfree(pnntsd);
++		kfree(pntsd);
++	}
++
++	return (rc);
+ }
+ #endif /* CONFIG_CIFS_EXPERIMENTAL */
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 093beaa..e9f4ec7 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -44,6 +44,7 @@
+ #include "cifs_fs_sb.h"
+ #include <linux/mm.h>
+ #include <linux/key-type.h>
++#include "dns_resolve.h"
+ #include "cifs_spnego.h"
+ #define CIFS_MAGIC_NUMBER 0xFF534D42	/* the first four bytes of SMB PDUs */
+ 
+@@ -96,6 +97,9 @@ cifs_read_super(struct super_block *sb, void *data,
+ {
+ 	struct inode *inode;
+ 	struct cifs_sb_info *cifs_sb;
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	int len;
++#endif
+ 	int rc = 0;
+ 
+ 	/* BB should we make this contingent on mount parm? */
+@@ -105,6 +109,25 @@ cifs_read_super(struct super_block *sb, void *data,
+ 	if (cifs_sb == NULL)
+ 		return -ENOMEM;
+ 
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	/* copy mount params to sb for use in submounts */
++	/* BB: should we move this after the mount so we
++	 * do not have to do the copy on failed mounts?
++	 * BB: May be it is better to do simple copy before
++	 * complex operation (mount), and in case of fail
++	 * just exit instead of doing mount and attempting
++	 * undo it if this copy fails?*/
++	len = strlen(data);
++	cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL);
++	if (cifs_sb->mountdata == NULL) {
++		kfree(sb->s_fs_info);
++		sb->s_fs_info = NULL;
++		return -ENOMEM;
++	}
++	strncpy(cifs_sb->mountdata, data, len + 1);
++	cifs_sb->mountdata[len] = '\0';
++#endif
++
+ 	rc = cifs_mount(sb, cifs_sb, data, devname);
+ 
+ 	if (rc) {
+@@ -154,6 +177,12 @@ out_no_root:
+ 
+ out_mount_failed:
+ 	if (cifs_sb) {
++#ifdef CONFIG_CIFS_DFS_UPCALL
++		if (cifs_sb->mountdata) {
++			kfree(cifs_sb->mountdata);
++			cifs_sb->mountdata = NULL;
++		}
++#endif
+ 		if (cifs_sb->local_nls)
+ 			unload_nls(cifs_sb->local_nls);
+ 		kfree(cifs_sb);
+@@ -177,6 +206,13 @@ cifs_put_super(struct super_block *sb)
+ 	if (rc) {
+ 		cERROR(1, ("cifs_umount failed with return code %d", rc));
+ 	}
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	if (cifs_sb->mountdata) {
++		kfree(cifs_sb->mountdata);
++		cifs_sb->mountdata = NULL;
++	}
++#endif
++
+ 	unload_nls(cifs_sb->local_nls);
+ 	kfree(cifs_sb);
+ 	return;
+@@ -435,6 +471,10 @@ static void cifs_umount_begin(struct vfsmount *vfsmnt, int flags)
+ 	struct cifs_sb_info *cifs_sb;
+ 	struct cifsTconInfo *tcon;
+ 
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	dfs_shrink_umount_helper(vfsmnt);
++#endif /* CONFIG CIFS_DFS_UPCALL */
++
+ 	if (!(flags & MNT_FORCE))
+ 		return;
+ 	cifs_sb = CIFS_SB(vfsmnt->mnt_sb);
+@@ -552,7 +592,7 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
+ 	return remote_llseek(file, offset, origin);
+ }
+ 
+-static struct file_system_type cifs_fs_type = {
++struct file_system_type cifs_fs_type = {
+ 	.owner = THIS_MODULE,
+ 	.name = "cifs",
+ 	.get_sb = cifs_get_sb,
+@@ -1015,11 +1055,16 @@ init_cifs(void)
+ 	if (rc)
+ 		goto out_unregister_filesystem;
+ #endif
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	rc = register_key_type(&key_type_dns_resolver);
++	if (rc)
++		goto out_unregister_key_type;
++#endif
+ 	oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
+ 	if (IS_ERR(oplockThread)) {
+ 		rc = PTR_ERR(oplockThread);
+ 		cERROR(1, ("error %d create oplock thread", rc));
+-		goto out_unregister_key_type;
++		goto out_unregister_dfs_key_type;
+ 	}
+ 
+ 	dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
+@@ -1033,7 +1078,11 @@ init_cifs(void)
+ 
+  out_stop_oplock_thread:
+ 	kthread_stop(oplockThread);
++ out_unregister_dfs_key_type:
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	unregister_key_type(&key_type_dns_resolver);
+  out_unregister_key_type:
++#endif
+ #ifdef CONFIG_CIFS_UPCALL
+ 	unregister_key_type(&cifs_spnego_key_type);
+  out_unregister_filesystem:
+@@ -1059,6 +1108,9 @@ exit_cifs(void)
+ #ifdef CONFIG_PROC_FS
+ 	cifs_proc_clean();
+ #endif
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	unregister_key_type(&key_type_dns_resolver);
++#endif
+ #ifdef CONFIG_CIFS_UPCALL
+ 	unregister_key_type(&cifs_spnego_key_type);
+ #endif
+diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
+index 2a21dc6..195b14d 100644
+--- a/fs/cifs/cifsfs.h
++++ b/fs/cifs/cifsfs.h
+@@ -32,6 +32,7 @@
+ #define TRUE 1
+ #endif
+ 
++extern struct file_system_type cifs_fs_type;
+ extern const struct address_space_operations cifs_addr_ops;
+ extern const struct address_space_operations cifs_addr_ops_smallbuf;
+ 
+@@ -60,6 +61,10 @@ extern int cifs_setattr(struct dentry *, struct iattr *);
+ 
+ extern const struct inode_operations cifs_file_inode_ops;
+ extern const struct inode_operations cifs_symlink_inode_ops;
++extern struct list_head cifs_dfs_automount_list;
++extern struct inode_operations cifs_dfs_referral_inode_operations;
++
++
+ 
+ /* Functions related to files and directories */
+ extern const struct file_operations cifs_file_ops;
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 1fde219..5d32d8d 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -1,7 +1,7 @@
+ /*
+  *   fs/cifs/cifsglob.h
+  *
+- *   Copyright (C) International Business Machines  Corp., 2002,2007
++ *   Copyright (C) International Business Machines  Corp., 2002,2008
+  *   Author(s): Steve French (sfrench at us.ibm.com)
+  *              Jeremy Allison (jra at samba.org)
+  *
+@@ -70,14 +70,6 @@
+ #endif
+ 
+ /*
+- * This information is kept on every Server we know about.
+- *
+- * Some things to note:
+- *
+- */
+-#define SERVER_NAME_LEN_WITH_NULL	(SERVER_NAME_LENGTH + 1)
+-
+-/*
+  * CIFS vfs client Status information (based on what we know.)
+  */
+ 
+@@ -460,6 +452,37 @@ struct dir_notify_req {
+        struct file *pfile;
+ };
+ 
++struct dfs_info3_param {
++	int flags; /* DFSREF_REFERRAL_SERVER, DFSREF_STORAGE_SERVER*/
++	int PathConsumed;
++	int server_type;
++	int ref_flag;
++	char *path_name;
++	char *node_name;
++};
++
++static inline void free_dfs_info_param(struct dfs_info3_param *param)
++{
++	if (param) {
++		kfree(param->path_name);
++		kfree(param->node_name);
++		kfree(param);
++	}
++}
++
++static inline void free_dfs_info_array(struct dfs_info3_param *param,
++				       int number_of_items)
++{
++	int i;
++	if ((number_of_items == 0) || (param == NULL))
++		return;
++	for (i = 0; i < number_of_items; i++) {
++		kfree(param[i].path_name);
++		kfree(param[i].node_name);
++	}
++	kfree(param);
++}
++
+ #define   MID_FREE 0
+ #define   MID_REQUEST_ALLOCATED 1
+ #define   MID_REQUEST_SUBMITTED 2
+diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
+index dbe6b84..47f7950 100644
+--- a/fs/cifs/cifspdu.h
++++ b/fs/cifs/cifspdu.h
+@@ -237,6 +237,9 @@
+ 				| DELETE | READ_CONTROL | WRITE_DAC \
+ 				| WRITE_OWNER | SYNCHRONIZE)
+ 
++#define SET_MINIMUM_RIGHTS (FILE_READ_EA | FILE_READ_ATTRIBUTES \
++				| READ_CONTROL | SYNCHRONIZE)
++
+ 
+ /*
+  * Invalid readdir handle
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index 8350eec..2f09f56 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -1,7 +1,7 @@
+ /*
+  *   fs/cifs/cifsproto.h
+  *
+- *   Copyright (c) International Business Machines  Corp., 2002,2007
++ *   Copyright (c) International Business Machines  Corp., 2002,2008
+  *   Author(s): Steve French (sfrench at us.ibm.com)
+  *
+  *   This library is free software; you can redistribute it and/or modify
+@@ -97,11 +97,14 @@ extern int cifs_get_inode_info_unix(struct inode **pinode,
+ 			const unsigned char *search_path,
+ 			struct super_block *sb, int xid);
+ extern void acl_to_uid_mode(struct inode *inode, const char *search_path);
+-extern int mode_to_acl(struct inode *inode, const char *path);
++extern int mode_to_acl(struct inode *inode, const char *path, __u64);
+ 
+ extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *,
+ 			const char *);
+ extern int cifs_umount(struct super_block *, struct cifs_sb_info *);
++#ifdef CONFIG_CIFS_DFS_UPCALL
++extern void dfs_shrink_umount_helper(struct vfsmount *vfsmnt);
++#endif
+ void cifs_proc_init(void);
+ void cifs_proc_clean(void);
+ 
+@@ -153,7 +156,7 @@ extern int get_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
+ 			const char *old_path,
+ 			const struct nls_table *nls_codepage,
+ 			unsigned int *pnum_referrals,
+-			unsigned char **preferrals,
++			struct dfs_info3_param **preferrals,
+ 			int remap);
+ extern void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
+ 				 struct super_block *sb, struct smb_vol *vol);
+@@ -342,6 +345,8 @@ extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon,
+ 		const struct nls_table *nls_codepage, int remap_special_chars);
+ extern int CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon,
+ 			__u16 fid, struct cifs_ntsd **acl_inf, __u32 *buflen);
++extern int CIFSSMBSetCIFSACL(const int, struct cifsTconInfo *, __u16,
++			struct cifs_ntsd *, __u32);
+ extern int CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon,
+ 		const unsigned char *searchName,
+ 		char *acl_inf, const int buflen, const int acl_type,
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index 9e8a6be..9409524 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -3156,6 +3156,71 @@ qsec_out:
+ /*	cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
+ 	return rc;
+ }
++
++int
++CIFSSMBSetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
++			struct cifs_ntsd *pntsd, __u32 acllen)
++{
++	__u16 byte_count, param_count, data_count, param_offset, data_offset;
++	int rc = 0;
++	int bytes_returned = 0;
++	SET_SEC_DESC_REQ *pSMB = NULL;
++	NTRANSACT_RSP *pSMBr = NULL;
++
++setCifsAclRetry:
++	rc = smb_init(SMB_COM_NT_TRANSACT, 19, tcon, (void **) &pSMB,
++			(void **) &pSMBr);
++	if (rc)
++			return (rc);
++
++	pSMB->MaxSetupCount = 0;
++	pSMB->Reserved = 0;
++
++	param_count = 8;
++	param_offset = offsetof(struct smb_com_transaction_ssec_req, Fid) - 4;
++	data_count = acllen;
++	data_offset = param_offset + param_count;
++	byte_count = 3 /* pad */  + param_count;
++
++	pSMB->DataCount = cpu_to_le32(data_count);
++	pSMB->TotalDataCount = pSMB->DataCount;
++	pSMB->MaxParameterCount = cpu_to_le32(4);
++	pSMB->MaxDataCount = cpu_to_le32(16384);
++	pSMB->ParameterCount = cpu_to_le32(param_count);
++	pSMB->ParameterOffset = cpu_to_le32(param_offset);
++	pSMB->TotalParameterCount = pSMB->ParameterCount;
++	pSMB->DataOffset = cpu_to_le32(data_offset);
++	pSMB->SetupCount = 0;
++	pSMB->SubCommand = cpu_to_le16(NT_TRANSACT_SET_SECURITY_DESC);
++	pSMB->ByteCount = cpu_to_le16(byte_count+data_count);
++
++	pSMB->Fid = fid; /* file handle always le */
++	pSMB->Reserved2 = 0;
++	pSMB->AclFlags = cpu_to_le32(CIFS_ACL_DACL);
++
++	if (pntsd && acllen) {
++		memcpy((char *) &pSMBr->hdr.Protocol + data_offset,
++			(char *) pntsd,
++			acllen);
++		pSMB->hdr.smb_buf_length += (byte_count + data_count);
++
++	} else
++		pSMB->hdr.smb_buf_length += byte_count;
++
++	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
++		(struct smb_hdr *) pSMBr, &bytes_returned, 0);
++
++	cFYI(1, ("SetCIFSACL bytes_returned: %d, rc: %d", bytes_returned, rc));
++	if (rc)
++		cFYI(1, ("Set CIFS ACL returned %d", rc));
++	cifs_buf_release(pSMB);
++
++	if (rc == -EAGAIN)
++		goto setCifsAclRetry;
++
++	return (rc);
++}
++
+ #endif /* CONFIG_CIFS_EXPERIMENTAL */
+ 
+ /* Legacy Query Path Information call for lookup to old servers such
+@@ -5499,7 +5564,7 @@ SetEARetry:
+ 	else
+ 		name_len = strnlen(ea_name, 255);
+ 
+-	count = sizeof(*parm_data) + ea_value_len + name_len + 1;
++	count = sizeof(*parm_data) + ea_value_len + name_len;
+ 	pSMB->MaxParameterCount = cpu_to_le16(2);
+ 	pSMB->MaxDataCount = cpu_to_le16(1000);	/* BB find max SMB size from sess */
+ 	pSMB->MaxSetupCount = 0;
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index fd9147c..65d0ba7 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -1,7 +1,7 @@
+ /*
+  *   fs/cifs/connect.c
+  *
+- *   Copyright (C) International Business Machines  Corp., 2002,2007
++ *   Copyright (C) International Business Machines  Corp., 2002,2008
+  *   Author(s): Steve French (sfrench at us.ibm.com)
+  *
+  *   This library is free software; you can redistribute it and/or modify
+@@ -1410,7 +1410,7 @@ connect_to_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
+ 		    const char *old_path, const struct nls_table *nls_codepage,
+ 		    int remap)
+ {
+-	unsigned char *referrals = NULL;
++	struct dfs_info3_param *referrals = NULL;
+ 	unsigned int num_referrals;
+ 	int rc = 0;
+ 
+@@ -1429,12 +1429,14 @@ connect_to_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
+ int
+ get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path,
+ 	     const struct nls_table *nls_codepage, unsigned int *pnum_referrals,
+-	     unsigned char **preferrals, int remap)
++	     struct dfs_info3_param **preferrals, int remap)
+ {
+ 	char *temp_unc;
+ 	int rc = 0;
++	unsigned char *targetUNCs;
+ 
+ 	*pnum_referrals = 0;
++	*preferrals = NULL;
+ 
+ 	if (pSesInfo->ipc_tid == 0) {
+ 		temp_unc = kmalloc(2 /* for slashes */ +
+@@ -1454,8 +1456,10 @@ get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path,
+ 		kfree(temp_unc);
+ 	}
+ 	if (rc == 0)
+-		rc = CIFSGetDFSRefer(xid, pSesInfo, old_path, preferrals,
++		rc = CIFSGetDFSRefer(xid, pSesInfo, old_path, &targetUNCs,
+ 				     pnum_referrals, nls_codepage, remap);
++	/* BB map targetUNCs to dfs_info3 structures, here or
++		in CIFSGetDFSRefer BB */
+ 
+ 	return rc;
+ }
+@@ -1964,7 +1968,15 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
+ 
+ 	if (existingCifsSes) {
+ 		pSesInfo = existingCifsSes;
+-		cFYI(1, ("Existing smb sess found"));
++		cFYI(1, ("Existing smb sess found (status=%d)",
++			pSesInfo->status));
++		down(&pSesInfo->sesSem);
++		if (pSesInfo->status == CifsNeedReconnect) {
++			cFYI(1, ("Session needs reconnect"));
++			rc = cifs_setup_session(xid, pSesInfo,
++						cifs_sb->local_nls);
++		}
++		up(&pSesInfo->sesSem);
+ 	} else if (!rc) {
+ 		cFYI(1, ("Existing smb sess not found"));
+ 		pSesInfo = sesInfoAlloc();
+@@ -3514,7 +3526,7 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
+ 		sesInfoFree(ses);
+ 
+ 	FreeXid(xid);
+-	return rc;	/* BB check if we should always return zero here */
++	return rc;
+ }
+ 
+ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index 37dc97a..699ec11 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -517,12 +517,10 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
+ 		d_add(direntry, NULL);
+ 	/*	if it was once a directory (but how can we tell?) we could do
+ 		shrink_dcache_parent(direntry); */
+-	} else {
+-		cERROR(1, ("Error 0x%x on cifs_get_inode_info in lookup of %s",
+-			   rc, full_path));
+-		/* BB special case check for Access Denied - watch security
+-		exposure of returning dir info implicitly via different rc
+-		if file exists or not but no access BB */
++	} else if (rc != -EACCES) {
++		cERROR(1, ("Unexpected lookup error %d", rc));
++		/* We special case check for Access Denied - since that
++		is a common return code */
+ 	}
+ 
+ 	kfree(full_path);
+diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
+new file mode 100644
+index 0000000..ef7f438
+--- /dev/null
++++ b/fs/cifs/dns_resolve.c
+@@ -0,0 +1,124 @@
++/*
++ *  fs/cifs/dns_resolve.c
++ *
++ *   Copyright (c) 2007 Igor Mammedov
++ *   Author(s): Igor Mammedov (niallain at gmail.com)
++ *              Steve French (sfrench at us.ibm.com)
++ *
++ *   Contains the CIFS DFS upcall routines used for hostname to
++ *   IP address translation.
++ *
++ *   This library is free software; you can redistribute it and/or modify
++ *   it under the terms of the GNU Lesser General Public License as published
++ *   by the Free Software Foundation; either version 2.1 of the License, or
++ *   (at your option) any later version.
++ *
++ *   This library is distributed in the hope that it will be useful,
++ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
++ *   the GNU Lesser General Public License for more details.
++ *
++ *   You should have received a copy of the GNU Lesser General Public License
++ *   along with this library; if not, write to the Free Software
++ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <keys/user-type.h>
++#include "dns_resolve.h"
++#include "cifsglob.h"
++#include "cifsproto.h"
++#include "cifs_debug.h"
++
++static int dns_resolver_instantiate(struct key *key, const void *data,
++		size_t datalen)
++{
++	int rc = 0;
++	char *ip;
++
++	ip = kmalloc(datalen+1, GFP_KERNEL);
++	if (!ip)
++		return -ENOMEM;
++
++	memcpy(ip, data, datalen);
++	ip[datalen] = '\0';
++
++	rcu_assign_pointer(key->payload.data, ip);
++
++	return rc;
++}
++
++struct key_type key_type_dns_resolver = {
++	.name        = "dns_resolver",
++	.def_datalen = sizeof(struct in_addr),
++	.describe    = user_describe,
++	.instantiate = dns_resolver_instantiate,
++	.match       = user_match,
++};
++
++
++/* Resolves server name to ip address.
++ * input:
++ * 	unc - server UNC
++ * output:
++ * 	*ip_addr - pointer to server ip, caller responcible for freeing it.
++ * return 0 on success
++ */
++int
++dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
++{
++	int rc = -EAGAIN;
++	struct key *rkey;
++	char *name;
++	int len;
++
++	if (!ip_addr || !unc)
++		return -EINVAL;
++
++	/* search for server name delimiter */
++	len = strlen(unc);
++	if (len < 3) {
++		cFYI(1, ("%s: unc is too short: %s", __FUNCTION__, unc));
++		return -EINVAL;
++	}
++	len -= 2;
++	name = memchr(unc+2, '\\', len);
++	if (!name) {
++		cFYI(1, ("%s: probably server name is whole unc: %s",
++					__FUNCTION__, unc));
++	} else {
++		len = (name - unc) - 2/* leading // */;
++	}
++
++	name = kmalloc(len+1, GFP_KERNEL);
++	if (!name) {
++		rc = -ENOMEM;
++		return rc;
++	}
++	memcpy(name, unc+2, len);
++	name[len] = 0;
++
++	rkey = request_key(&key_type_dns_resolver, name, "");
++	if (!IS_ERR(rkey)) {
++		len = strlen(rkey->payload.data);
++		*ip_addr = kmalloc(len+1, GFP_KERNEL);
++		if (*ip_addr) {
++			memcpy(*ip_addr, rkey->payload.data, len);
++			(*ip_addr)[len] = '\0';
++			cFYI(1, ("%s: resolved: %s to %s", __FUNCTION__,
++					rkey->description,
++					*ip_addr
++				));
++			rc = 0;
++		} else {
++			rc = -ENOMEM;
++		}
++		key_put(rkey);
++	} else {
++		cERROR(1, ("%s: unable to resolve: %s", __FUNCTION__, name));
++	}
++
++	kfree(name);
++	return rc;
++}
++
++
+diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h
+new file mode 100644
+index 0000000..073fdc3
+--- /dev/null
++++ b/fs/cifs/dns_resolve.h
+@@ -0,0 +1,32 @@
++/*
++ *   fs/cifs/dns_resolve.h -- DNS Resolver upcall management for CIFS DFS
++ *                            Handles host name to IP address resolution
++ * 
++ *   Copyright (c) International Business Machines  Corp., 2008
++ *   Author(s): Steve French (sfrench at us.ibm.com)
++ *
++ *   This library is free software; you can redistribute it and/or modify
++ *   it under the terms of the GNU Lesser General Public License as published
++ *   by the Free Software Foundation; either version 2.1 of the License, or
++ *   (at your option) any later version.
++ *
++ *   This library is distributed in the hope that it will be useful,
++ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
++ *   the GNU Lesser General Public License for more details.
++ *
++ *   You should have received a copy of the GNU Lesser General Public License
++ *   along with this library; if not, write to the Free Software
++ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#ifndef _DNS_RESOLVE_H
++#define _DNS_RESOLVE_H
++
++#ifdef __KERNEL__
++#include <linux/key-type.h>
++extern struct key_type key_type_dns_resolver;
++extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr);
++#endif /* KERNEL */
++
++#endif /* _DNS_RESOLVE_H */
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index dd26e27..5f7c374 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -1179,12 +1179,10 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
+ 		atomic_dec(&open_file->wrtPending);
+ 		/* Does mm or vfs already set times? */
+ 		inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
+-		if ((bytes_written > 0) && (offset)) {
++		if ((bytes_written > 0) && (offset))
+ 			rc = 0;
+-		} else if (bytes_written < 0) {
+-			if (rc != -EBADF)
+-				rc = bytes_written;
+-		}
++		else if (bytes_written < 0)
++			rc = bytes_written;
+ 	} else {
+ 		cFYI(1, ("No writeable filehandles for inode"));
+ 		rc = -EIO;
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index e915eb1..d9567ba 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -54,9 +54,9 @@ int cifs_get_inode_info_unix(struct inode **pinode,
+ 					    MAX_TREE_SIZE + 1) +
+ 				    strnlen(search_path, MAX_PATHCONF) + 1,
+ 				    GFP_KERNEL);
+-			if (tmp_path == NULL) {
++			if (tmp_path == NULL)
+ 				return -ENOMEM;
+-			}
++
+ 			/* have to skip first of the double backslash of
+ 			   UNC name */
+ 			strncpy(tmp_path, pTcon->treeName, MAX_TREE_SIZE);
+@@ -511,7 +511,8 @@ int cifs_get_inode_info(struct inode **pinode,
+ 		}
+ 
+ 		spin_lock(&inode->i_lock);
+-		if (is_size_safe_to_change(cifsInfo, le64_to_cpu(pfindData->EndOfFile))) {
++		if (is_size_safe_to_change(cifsInfo,
++					   le64_to_cpu(pfindData->EndOfFile))) {
+ 			/* can not safely shrink the file size here if the
+ 			   client is writing to it due to potential races */
+ 			i_size_write(inode, le64_to_cpu(pfindData->EndOfFile));
+@@ -931,7 +932,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
+ 		(CIFS_UNIX_POSIX_PATH_OPS_CAP &
+ 			le64_to_cpu(pTcon->fsUnixInfo.Capability))) {
+ 		u32 oplock = 0;
+-		FILE_UNIX_BASIC_INFO * pInfo =
++		FILE_UNIX_BASIC_INFO *pInfo =
+ 			kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
+ 		if (pInfo == NULL) {
+ 			rc = -ENOMEM;
+@@ -1607,7 +1608,14 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
+ 						CIFS_MOUNT_MAP_SPECIAL_CHR);
+ 	else if (attrs->ia_valid & ATTR_MODE) {
+ 		rc = 0;
+-		if ((mode & S_IWUGO) == 0) /* not writeable */ {
++#ifdef CONFIG_CIFS_EXPERIMENTAL
++		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
++			rc = mode_to_acl(direntry->d_inode, full_path, mode);
++		else if ((mode & S_IWUGO) == 0) {
++#else
++		if ((mode & S_IWUGO) == 0) {
++#endif
++			/* not writeable */
+ 			if ((cifsInode->cifsAttrs & ATTR_READONLY) == 0) {
+ 				set_dosattr = TRUE;
+ 				time_buf.Attributes =
+@@ -1626,10 +1634,10 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
+ 			if (time_buf.Attributes == 0)
+ 				time_buf.Attributes |= cpu_to_le32(ATTR_NORMAL);
+ 		}
+-		/* BB to be implemented -
+-		   via Windows security descriptors or streams */
+-		/* CIFSSMBWinSetPerms(xid, pTcon, full_path, mode, uid, gid,
+-				      cifs_sb->local_nls); */
++#ifdef CONFIG_CIFS_EXPERIMENTAL
++		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
++			mode_to_acl(direntry->d_inode, full_path, mode);
++#endif
+ 	}
+ 
+ 	if (attrs->ia_valid & ATTR_ATIME) {
+diff --git a/fs/cifs/link.c b/fs/cifs/link.c
+index 11f2657..1d6fb01 100644
+--- a/fs/cifs/link.c
++++ b/fs/cifs/link.c
+@@ -1,7 +1,7 @@
+ /*
+  *   fs/cifs/link.c
+  *
+- *   Copyright (C) International Business Machines  Corp., 2002,2003
++ *   Copyright (C) International Business Machines  Corp., 2002,2008
+  *   Author(s): Steve French (sfrench at us.ibm.com)
+  *
+  *   This library is free software; you can redistribute it and/or modify
+@@ -236,8 +236,6 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen)
+ 	char *full_path = NULL;
+ 	char *tmp_path = NULL;
+ 	char *tmpbuffer;
+-	unsigned char *referrals = NULL;
+-	unsigned int num_referrals = 0;
+ 	int len;
+ 	__u16 fid;
+ 
+@@ -297,8 +295,11 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen)
+ 				cFYI(1, ("Error closing junction point "
+ 					 "(open for ioctl)"));
+ 			}
++			/* BB unwind this long, nested function, or remove BB */
+ 			if (rc == -EIO) {
+ 				/* Query if DFS Junction */
++				unsigned int num_referrals = 0;
++				struct dfs_info3_param *refs = NULL;
+ 				tmp_path =
+ 					kmalloc(MAX_TREE_SIZE + MAX_PATHCONF + 1,
+ 						GFP_KERNEL);
+@@ -310,7 +311,7 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen)
+ 					rc = get_dfs_path(xid, pTcon->ses,
+ 						tmp_path,
+ 						cifs_sb->local_nls,
+-						&num_referrals, &referrals,
++						&num_referrals, &refs,
+ 						cifs_sb->mnt_cifs_flags &
+ 						    CIFS_MOUNT_MAP_SPECIAL_CHR);
+ 					cFYI(1, ("Get DFS for %s rc = %d ",
+@@ -320,14 +321,13 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen)
+ 					else {
+ 						cFYI(1, ("num referral: %d",
+ 							num_referrals));
+-						if (referrals) {
+-							cFYI(1,("referral string: %s", referrals));
++						if (refs && refs->path_name) {
+ 							strncpy(tmpbuffer,
+-								referrals,
++								refs->path_name,
+ 								len-1);
+ 						}
+ 					}
+-					kfree(referrals);
++					kfree(refs);
+ 					kfree(tmp_path);
+ }
+ 				/* BB add code like else decode referrals
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index d0cb469..d2153ab 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -528,9 +528,11 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
+ 			rc = -EOVERFLOW;
+ 			goto ssetup_exit;
+ 		}
+-		ses->server->mac_signing_key.len = msg->sesskey_len;
+-		memcpy(ses->server->mac_signing_key.data.krb5, msg->data,
+-			msg->sesskey_len);
++		if (first_time) {
++			ses->server->mac_signing_key.len = msg->sesskey_len;
++			memcpy(ses->server->mac_signing_key.data.krb5,
++				msg->data, msg->sesskey_len);
++		}
+ 		pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
+ 		capabilities |= CAP_EXTENDED_SECURITY;
+ 		pSMB->req.Capabilities = cpu_to_le32(capabilities);
+@@ -540,7 +542,7 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
+ 
+ 		if (ses->capabilities & CAP_UNICODE) {
+ 			/* unicode strings must be word aligned */
+-			if (iov[0].iov_len % 2) {
++			if ((iov[0].iov_len + iov[1].iov_len) % 2) {
+ 				*bcc_ptr = 0;
+ 				bcc_ptr++;
+ 			}
 diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
 index dcc6aea..e3eb355 100644
 --- a/fs/coda/psdev.c
@@ -170245,6 +190926,484 @@
  #define IDE_ARCH_OBSOLETE_DEFAULTS
  
  static __inline__ int ide_default_irq(unsigned long base)
+diff --git a/include/asm-s390/airq.h b/include/asm-s390/airq.h
+new file mode 100644
+index 0000000..41d028c
+--- /dev/null
++++ b/include/asm-s390/airq.h
+@@ -0,0 +1,19 @@
++/*
++ *  include/asm-s390/airq.h
++ *
++ *    Copyright IBM Corp. 2002,2007
++ *    Author(s): Ingo Adlung <adlung at de.ibm.com>
++ *		 Cornelia Huck <cornelia.huck at de.ibm.com>
++ *		 Arnd Bergmann <arndb at de.ibm.com>
++ *		 Peter Oberparleiter <peter.oberparleiter at de.ibm.com>
++ */
++
++#ifndef _ASM_S390_AIRQ_H
++#define _ASM_S390_AIRQ_H
++
++typedef void (*adapter_int_handler_t)(void *, void *);
++
++void *s390_register_adapter_interrupt(adapter_int_handler_t, void *);
++void s390_unregister_adapter_interrupt(void *);
++
++#endif /* _ASM_S390_AIRQ_H */
+diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h
+index 2f08c16..123b557 100644
+--- a/include/asm-s390/cio.h
++++ b/include/asm-s390/cio.h
+@@ -24,8 +24,8 @@
+  * @fmt: format
+  * @pfch: prefetch
+  * @isic: initial-status interruption control
+- * @alcc: adress-limit checking control
+- * @ssi: supress-suspended interruption
++ * @alcc: address-limit checking control
++ * @ssi: suppress-suspended interruption
+  * @zcc: zero condition code
+  * @ectl: extended control
+  * @pno: path not operational
+diff --git a/include/asm-s390/dasd.h b/include/asm-s390/dasd.h
+index 604f68f..3f002e1 100644
+--- a/include/asm-s390/dasd.h
++++ b/include/asm-s390/dasd.h
+@@ -105,7 +105,7 @@ typedef struct dasd_information_t {
+ } dasd_information_t;
+ 
+ /*
+- * Read Subsystem Data - Perfomance Statistics
++ * Read Subsystem Data - Performance Statistics
+  */ 
+ typedef struct dasd_rssd_perf_stats_t {
+ 	unsigned char  invalid:1;
+diff --git a/include/asm-s390/ipl.h b/include/asm-s390/ipl.h
+index 2c40fd3..c1b2e50 100644
+--- a/include/asm-s390/ipl.h
++++ b/include/asm-s390/ipl.h
+@@ -83,6 +83,8 @@ extern u32 dump_prefix_page;
+ extern unsigned int zfcpdump_prefix_array[];
+ 
+ extern void do_reipl(void);
++extern void do_halt(void);
++extern void do_poff(void);
+ extern void ipl_save_parameters(void);
+ 
+ enum {
+@@ -118,7 +120,7 @@ struct ipl_info
+ };
+ 
+ extern struct ipl_info ipl_info;
+-extern void setup_ipl_info(void);
++extern void setup_ipl(void);
+ 
+ /*
+  * DIAG 308 support
+@@ -141,6 +143,10 @@ enum diag308_opt {
+ 	DIAG308_IPL_OPT_DUMP	= 0x20,
+ };
+ 
++enum diag308_flags {
++	DIAG308_FLAGS_LP_VALID	= 0x80,
++};
++
+ enum diag308_rc {
+ 	DIAG308_RC_OK	= 1,
+ };
+diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
+index 05b8421..a77d4ba 100644
+--- a/include/asm-s390/mmu_context.h
++++ b/include/asm-s390/mmu_context.h
+@@ -12,10 +12,15 @@
+ #include <asm/pgalloc.h>
+ #include <asm-generic/mm_hooks.h>
+ 
+-/*
+- * get a new mmu context.. S390 don't know about contexts.
+- */
+-#define init_new_context(tsk,mm)        0
++static inline int init_new_context(struct task_struct *tsk,
++				   struct mm_struct *mm)
++{
++	mm->context = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
++#ifdef CONFIG_64BIT
++	mm->context |= _ASCE_TYPE_REGION3;
++#endif
++	return 0;
++}
+ 
+ #define destroy_context(mm)             do { } while (0)
+ 
+@@ -27,19 +32,11 @@
+ 
+ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
+ {
+-	pgd_t *pgd = mm->pgd;
+-	unsigned long asce_bits;
+-
+-	/* Calculate asce bits from the first pgd table entry. */
+-	asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
+-#ifdef CONFIG_64BIT
+-	asce_bits |= _ASCE_TYPE_REGION3;
+-#endif
+-	S390_lowcore.user_asce = asce_bits | __pa(pgd);
++	S390_lowcore.user_asce = mm->context | __pa(mm->pgd);
+ 	if (switch_amode) {
+ 		/* Load primary space page table origin. */
+-		pgd_t *shadow_pgd = get_shadow_table(pgd) ? : pgd;
+-		S390_lowcore.user_exec_asce = asce_bits | __pa(shadow_pgd);
++		pgd_t *shadow_pgd = get_shadow_table(mm->pgd) ? : mm->pgd;
++		S390_lowcore.user_exec_asce = mm->context | __pa(shadow_pgd);
+ 		asm volatile(LCTL_OPCODE" 1,1,%0\n"
+ 			     : : "m" (S390_lowcore.user_exec_asce) );
+ 	} else
+diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
+index 1f530f8..79b9eab 100644
+--- a/include/asm-s390/pgtable.h
++++ b/include/asm-s390/pgtable.h
+@@ -104,41 +104,27 @@ extern char empty_zero_page[PAGE_SIZE];
+ 
+ #ifndef __ASSEMBLY__
+ /*
+- * Just any arbitrary offset to the start of the vmalloc VM area: the
+- * current 8MB value just means that there will be a 8MB "hole" after the
+- * physical memory until the kernel virtual memory starts.  That means that
+- * any out-of-bounds memory accesses will hopefully be caught.
+- * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+- * area for the same reason. ;)
+- * vmalloc area starts at 4GB to prevent syscall table entry exchanging
+- * from modules.
+- */
+-extern unsigned long vmalloc_end;
+-
+-#ifdef CONFIG_64BIT
+-#define VMALLOC_ADDR	(max(0x100000000UL, (unsigned long) high_memory))
+-#else
+-#define VMALLOC_ADDR	((unsigned long) high_memory)
+-#endif
+-#define VMALLOC_OFFSET	(8*1024*1024)
+-#define VMALLOC_START	((VMALLOC_ADDR + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
+-#define VMALLOC_END	vmalloc_end
+-
+-/*
+- * We need some free virtual space to be able to do vmalloc.
+- * VMALLOC_MIN_SIZE defines the minimum size of the vmalloc
+- * area. On a machine with 2GB memory we make sure that we
+- * have at least 128MB free space for vmalloc. On a machine
+- * with 4TB we make sure we have at least 128GB.
++ * The vmalloc area will always be on the topmost area of the kernel
++ * mapping. We reserve 96MB (31bit) / 1GB (64bit) for vmalloc,
++ * which should be enough for any sane case.
++ * By putting vmalloc at the top, we maximise the gap between physical
++ * memory and vmalloc to catch misplaced memory accesses. As a side
++ * effect, this also makes sure that 64 bit module code cannot be used
++ * as system call address.
+  */
+ #ifndef __s390x__
+-#define VMALLOC_MIN_SIZE	0x8000000UL
+-#define VMALLOC_END_INIT	0x80000000UL
++#define VMALLOC_START	0x78000000UL
++#define VMALLOC_END	0x7e000000UL
++#define VMEM_MAP_MAX	0x80000000UL
+ #else /* __s390x__ */
+-#define VMALLOC_MIN_SIZE	0x2000000000UL
+-#define VMALLOC_END_INIT	0x40000000000UL
++#define VMALLOC_START	0x3e000000000UL
++#define VMALLOC_END	0x3e040000000UL
++#define VMEM_MAP_MAX	0x40000000000UL
+ #endif /* __s390x__ */
+ 
++#define VMEM_MAP	((struct page *) VMALLOC_END)
++#define VMEM_MAP_SIZE	((VMALLOC_START / PAGE_SIZE) * sizeof(struct page))
++
+ /*
+  * A 31 bit pagetable entry of S390 has following format:
+  *  |   PFRA          |    |  OS  |
+diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
+index 21d40a1..c86b982 100644
+--- a/include/asm-s390/processor.h
++++ b/include/asm-s390/processor.h
+@@ -59,9 +59,6 @@ extern void s390_adjust_jiffies(void);
+ extern void print_cpu_info(struct cpuinfo_S390 *);
+ extern int get_cpu_capability(unsigned int *);
+ 
+-/* Lazy FPU handling on uni-processor */
+-extern struct task_struct *last_task_used_math;
+-
+ /*
+  * User space process size: 2GB for 31 bit, 4TB for 64 bit.
+  */
+@@ -95,7 +92,6 @@ struct thread_struct {
+         unsigned long ksp;              /* kernel stack pointer             */
+ 	mm_segment_t mm_segment;
+         unsigned long prot_addr;        /* address of protection-excep.     */
+-        unsigned int error_code;        /* error-code of last prog-excep.   */
+         unsigned int trap_no;
+         per_struct per_info;
+ 	/* Used to give failing instruction back to user for ieee exceptions */
+diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h
+index 332ee73..61f6952 100644
+--- a/include/asm-s390/ptrace.h
++++ b/include/asm-s390/ptrace.h
+@@ -465,6 +465,14 @@ struct user_regs_struct
+ #ifdef __KERNEL__
+ #define __ARCH_SYS_PTRACE	1
+ 
++/*
++ * These are defined as per linux/ptrace.h, which see.
++ */
++#define arch_has_single_step()	(1)
++struct task_struct;
++extern void user_enable_single_step(struct task_struct *);
++extern void user_disable_single_step(struct task_struct *);
++
+ #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
+ #define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
+ #define regs_return_value(regs)((regs)->gprs[2])
+diff --git a/include/asm-s390/qdio.h b/include/asm-s390/qdio.h
+index 74db1dc..4b8ff55 100644
+--- a/include/asm-s390/qdio.h
++++ b/include/asm-s390/qdio.h
+@@ -184,7 +184,7 @@ struct qdr {
+ #endif /* QDIO_32_BIT */
+ 	unsigned long qiba;             /* queue-information-block address */
+ 	unsigned int  res8;             /* reserved */
+-	unsigned int  qkey    :  4;     /* queue-informatio-block key */
++	unsigned int  qkey    :  4;	/* queue-information-block key */
+ 	unsigned int  res9    : 28;     /* reserved */
+ /*	union _qd {*/ /* why this? */
+ 		struct qdesfmt0 qdf0[126];
+diff --git a/include/asm-s390/rwsem.h b/include/asm-s390/rwsem.h
+index 90f4ecc..9d2a179 100644
+--- a/include/asm-s390/rwsem.h
++++ b/include/asm-s390/rwsem.h
+@@ -91,8 +91,8 @@ struct rw_semaphore {
+ #endif
+ 
+ #define __RWSEM_INITIALIZER(name) \
+-{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
+-  __RWSEM_DEP_MAP_INIT(name) }
++ { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \
++   LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+ 
+ #define DECLARE_RWSEM(name) \
+ 	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+diff --git a/include/asm-s390/sclp.h b/include/asm-s390/sclp.h
+index cb9faf1..b5f2843 100644
+--- a/include/asm-s390/sclp.h
++++ b/include/asm-s390/sclp.h
+@@ -27,7 +27,25 @@ struct sclp_ipl_info {
+ 	char loadparm[LOADPARM_LEN];
+ };
+ 
+-void sclp_readinfo_early(void);
++struct sclp_cpu_entry {
++	u8 address;
++	u8 reserved0[13];
++	u8 type;
++	u8 reserved1;
++} __attribute__((packed));
++
++struct sclp_cpu_info {
++	unsigned int configured;
++	unsigned int standby;
++	unsigned int combined;
++	int has_cpu_type;
++	struct sclp_cpu_entry cpu[255];
++};
++
++int sclp_get_cpu_info(struct sclp_cpu_info *info);
++int sclp_cpu_configure(u8 cpu);
++int sclp_cpu_deconfigure(u8 cpu);
++void sclp_read_info_early(void);
+ void sclp_facilities_detect(void);
+ unsigned long long sclp_memory_detect(void);
+ int sclp_sdias_blk_count(void);
+diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
+index 07708c0..c7b7432 100644
+--- a/include/asm-s390/smp.h
++++ b/include/asm-s390/smp.h
+@@ -35,8 +35,6 @@ extern void machine_restart_smp(char *);
+ extern void machine_halt_smp(void);
+ extern void machine_power_off_smp(void);
+ 
+-extern void smp_setup_cpu_possible_map(void);
+-
+ #define NO_PROC_ID		0xFF		/* No processor magic marker */
+ 
+ /*
+@@ -92,6 +90,8 @@ extern void __cpu_die (unsigned int cpu);
+ extern void cpu_die (void) __attribute__ ((noreturn));
+ extern int __cpu_up (unsigned int cpu);
+ 
++extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
++	void *info, int wait);
+ #endif
+ 
+ #ifndef CONFIG_SMP
+@@ -103,7 +103,6 @@ static inline void smp_send_stop(void)
+ 
+ #define hard_smp_processor_id()		0
+ #define smp_cpu_not_running(cpu)	1
+-#define smp_setup_cpu_possible_map()	do { } while (0)
+ #endif
+ 
+ extern union save_area *zfcpdump_save_areas[NR_CPUS + 1];
+diff --git a/include/asm-s390/spinlock.h b/include/asm-s390/spinlock.h
+index 3fd4382..df84ae9 100644
+--- a/include/asm-s390/spinlock.h
++++ b/include/asm-s390/spinlock.h
+@@ -53,44 +53,48 @@ _raw_compare_and_swap(volatile unsigned int *lock,
+  */
+ 
+ #define __raw_spin_is_locked(x) ((x)->owner_cpu != 0)
+-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+ #define __raw_spin_unlock_wait(lock) \
+ 	do { while (__raw_spin_is_locked(lock)) \
+ 		 _raw_spin_relax(lock); } while (0)
+ 
+-extern void _raw_spin_lock_wait(raw_spinlock_t *, unsigned int pc);
+-extern int _raw_spin_trylock_retry(raw_spinlock_t *, unsigned int pc);
++extern void _raw_spin_lock_wait(raw_spinlock_t *);
++extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags);
++extern int _raw_spin_trylock_retry(raw_spinlock_t *);
+ extern void _raw_spin_relax(raw_spinlock_t *lock);
+ 
+ static inline void __raw_spin_lock(raw_spinlock_t *lp)
+ {
+-	unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
+ 	int old;
+ 
+ 	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
+-	if (likely(old == 0)) {
+-		lp->owner_pc = pc;
++	if (likely(old == 0))
+ 		return;
+-	}
+-	_raw_spin_lock_wait(lp, pc);
++	_raw_spin_lock_wait(lp);
++}
++
++static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
++					 unsigned long flags)
++{
++	int old;
++
++	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
++	if (likely(old == 0))
++		return;
++	_raw_spin_lock_wait_flags(lp, flags);
+ }
+ 
+ static inline int __raw_spin_trylock(raw_spinlock_t *lp)
+ {
+-	unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
+ 	int old;
+ 
+ 	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
+-	if (likely(old == 0)) {
+-		lp->owner_pc = pc;
++	if (likely(old == 0))
+ 		return 1;
+-	}
+-	return _raw_spin_trylock_retry(lp, pc);
++	return _raw_spin_trylock_retry(lp);
+ }
+ 
+ static inline void __raw_spin_unlock(raw_spinlock_t *lp)
+ {
+-	lp->owner_pc = 0;
+ 	_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
+ }
+ 		
+diff --git a/include/asm-s390/spinlock_types.h b/include/asm-s390/spinlock_types.h
+index b7ac13f..654abc4 100644
+--- a/include/asm-s390/spinlock_types.h
++++ b/include/asm-s390/spinlock_types.h
+@@ -7,7 +7,6 @@
+ 
+ typedef struct {
+ 	volatile unsigned int owner_cpu;
+-	volatile unsigned int owner_pc;
+ } __attribute__ ((aligned (4))) raw_spinlock_t;
+ 
+ #define __RAW_SPIN_LOCK_UNLOCKED	{ 0 }
+diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
+index a69bd24..70fa5ae 100644
+--- a/include/asm-s390/tlbflush.h
++++ b/include/asm-s390/tlbflush.h
+@@ -42,11 +42,11 @@ static inline void __tlb_flush_global(void)
+ /*
+  * Flush all tlb entries of a page table on all cpus.
+  */
+-static inline void __tlb_flush_idte(pgd_t *pgd)
++static inline void __tlb_flush_idte(unsigned long asce)
+ {
+ 	asm volatile(
+ 		"	.insn	rrf,0xb98e0000,0,%0,%1,0"
+-		: : "a" (2048), "a" (__pa(pgd) & PAGE_MASK) : "cc" );
++		: : "a" (2048), "a" (asce) : "cc" );
+ }
+ 
+ static inline void __tlb_flush_mm(struct mm_struct * mm)
+@@ -61,11 +61,11 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
+ 	 * only ran on the local cpu.
+ 	 */
+ 	if (MACHINE_HAS_IDTE) {
+-		pgd_t *shadow_pgd = get_shadow_table(mm->pgd);
++		pgd_t *shadow = get_shadow_table(mm->pgd);
+ 
+-		if (shadow_pgd)
+-			__tlb_flush_idte(shadow_pgd);
+-		__tlb_flush_idte(mm->pgd);
++		if (shadow)
++			__tlb_flush_idte((unsigned long) shadow | mm->context);
++		__tlb_flush_idte((unsigned long) mm->pgd | mm->context);
+ 		return;
+ 	}
+ 	preempt_disable();
+@@ -106,9 +106,23 @@ static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
+  */
+ #define flush_tlb()				do { } while (0)
+ #define flush_tlb_all()				do { } while (0)
+-#define flush_tlb_mm(mm)			__tlb_flush_mm_cond(mm)
+ #define flush_tlb_page(vma, addr)		do { } while (0)
+-#define flush_tlb_range(vma, start, end)	__tlb_flush_mm_cond(mm)
+-#define flush_tlb_kernel_range(start, end)	__tlb_flush_mm(&init_mm)
++
++static inline void flush_tlb_mm(struct mm_struct *mm)
++{
++	__tlb_flush_mm_cond(mm);
++}
++
++static inline void flush_tlb_range(struct vm_area_struct *vma,
++				   unsigned long start, unsigned long end)
++{
++	__tlb_flush_mm_cond(vma->vm_mm);
++}
++
++static inline void flush_tlb_kernel_range(unsigned long start,
++					  unsigned long end)
++{
++	__tlb_flush_mm(&init_mm);
++}
+ 
+ #endif /* _S390_TLBFLUSH_H */
+diff --git a/include/asm-s390/zcrypt.h b/include/asm-s390/zcrypt.h
+index a5dada6..f228f1b 100644
+--- a/include/asm-s390/zcrypt.h
++++ b/include/asm-s390/zcrypt.h
+@@ -117,7 +117,7 @@ struct CPRBX {
+ 	unsigned char	padx004[16 - sizeof (char *)];
+ 	unsigned char *	req_extb;	/* request extension block 'addr'*/
+ 	unsigned char	padx005[16 - sizeof (char *)];
+-	unsigned char *	rpl_extb;	/* reply extension block 'addres'*/
++	unsigned char *	rpl_extb;	/* reply extension block 'address'*/
+ 	unsigned short	ccp_rtcode;	/* server return code		 */
+ 	unsigned short	ccp_rscode;	/* server reason code		 */
+ 	unsigned int	mac_data_len;	/* Mac Data Length		 */
 diff --git a/include/asm-x86/thread_info_32.h b/include/asm-x86/thread_info_32.h
 index 22a8cbc..ef58fd2 100644
 --- a/include/asm-x86/thread_info_32.h
@@ -171269,18 +192428,19 @@
  	int (*match)(struct attribute_container *, struct device *);
  #define	ATTRIBUTE_CONTAINER_NO_CLASSDEVS	0x01
 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
-index d18ee67..bd20a4e 100644
+index d18ee67..49b7a4c 100644
 --- a/include/linux/blkdev.h
 +++ b/include/linux/blkdev.h
-@@ -144,7 +144,6 @@ enum rq_cmd_type_bits {
+@@ -143,8 +143,6 @@ enum rq_cmd_type_bits {
+ 	 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
  	 * private REQ_LB opcodes to differentiate what type of request this is
  	 */
- 	REQ_TYPE_ATA_CMD,
+-	REQ_TYPE_ATA_CMD,
 -	REQ_TYPE_ATA_TASK,
  	REQ_TYPE_ATA_TASKFILE,
  	REQ_TYPE_ATA_PC,
  };
-@@ -766,6 +765,7 @@ extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
+@@ -766,6 +764,7 @@ extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
  extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
  extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
  extern void blk_queue_dma_alignment(struct request_queue *, int);
@@ -172983,7 +194143,7 @@
  #define I2C_DRIVERID_I2CDEV	900
  #define I2C_DRIVERID_ARP        902    /* SMBus ARP Client              */
 diff --git a/include/linux/ide.h b/include/linux/ide.h
-index 9a6a41e..1e44099 100644
+index 9a6a41e..27cb39d 100644
 --- a/include/linux/ide.h
 +++ b/include/linux/ide.h
 @@ -27,25 +27,10 @@
@@ -173036,7 +194196,28 @@
  #define IDE_DATA_REG		(HWIF(drive)->io_ports[IDE_DATA_OFFSET])
  #define IDE_ERROR_REG		(HWIF(drive)->io_ports[IDE_ERROR_OFFSET])
  #define IDE_NSECTOR_REG		(HWIF(drive)->io_ports[IDE_NSECTOR_OFFSET])
-@@ -327,47 +306,16 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw,
+@@ -128,7 +107,6 @@ typedef unsigned char	byte;	/* used everywhere */
+ #define BAD_W_STAT		(BAD_R_STAT  | WRERR_STAT)
+ #define BAD_STAT		(BAD_R_STAT  | DRQ_STAT)
+ #define DRIVE_READY		(READY_STAT  | SEEK_STAT)
+-#define DATA_READY		(DRQ_STAT)
+ 
+ #define BAD_CRC			(ABRT_ERR    | ICRC_ERR)
+ 
+@@ -219,8 +197,11 @@ typedef struct hw_regs_s {
+ } hw_regs_t;
+ 
+ struct hwif_s * ide_find_port(unsigned long);
++void ide_init_port_data(struct hwif_s *, unsigned int);
++void ide_init_port_hw(struct hwif_s *, hw_regs_t *);
+ 
+-int ide_register_hw(hw_regs_t *, void (*)(struct hwif_s *), int,
++struct ide_drive_s;
++int ide_register_hw(hw_regs_t *, void (*)(struct ide_drive_s *),
+ 		    struct hwif_s **);
+ 
+ void ide_setup_ports(	hw_regs_t *hw,
+@@ -327,47 +308,16 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw,
  typedef union {
  	unsigned all			: 8;
  	struct {
@@ -173084,7 +194265,7 @@
   * ATA-IDE Select Register, aka Device-Head
   *
   * head		: always zeros here
-@@ -398,131 +346,6 @@ typedef union {
+@@ -398,131 +348,6 @@ typedef union {
  } select_t, ata_select_t;
  
  /*
@@ -173216,7 +194397,15 @@
   * Status returned from various ide_ functions
   */
  typedef enum {
-@@ -701,8 +524,6 @@ typedef struct hwif_s {
+@@ -568,7 +393,6 @@ typedef struct ide_drive_s {
+ 	u8	state;			/* retry state */
+ 	u8	waiting_for_dma;	/* dma currently in progress */
+ 	u8	unmask;			/* okay to unmask other irqs */
+-	u8	bswap;			/* byte swap data */
+ 	u8	noflush;		/* don't attempt flushes */
+ 	u8	dsc_overlap;		/* DSC overlap */
+ 	u8	nice1;			/* give potential excess bandwidth */
+@@ -701,36 +525,29 @@ typedef struct hwif_s {
  	void	(*pre_reset)(ide_drive_t *);
  	/* routine to reset controller after a disk reset */
  	void	(*resetproc)(ide_drive_t *);
@@ -173225,7 +194414,37 @@
  	/* special host masking for drive selection */
  	void	(*maskproc)(ide_drive_t *, int);
  	/* check host's drive quirk list */
-@@ -766,7 +587,6 @@ typedef struct hwif_s {
+-	int	(*quirkproc)(ide_drive_t *);
++	void	(*quirkproc)(ide_drive_t *);
+ 	/* driver soft-power interface */
+ 	int	(*busproc)(ide_drive_t *, int);
+ #endif
+ 	u8 (*mdma_filter)(ide_drive_t *);
+ 	u8 (*udma_filter)(ide_drive_t *);
+ 
+-	void (*fixup)(struct hwif_s *);
+-
+ 	void (*ata_input_data)(ide_drive_t *, void *, u32);
+ 	void (*ata_output_data)(ide_drive_t *, void *, u32);
+ 
+ 	void (*atapi_input_bytes)(ide_drive_t *, void *, u32);
+ 	void (*atapi_output_bytes)(ide_drive_t *, void *, u32);
+ 
++	void (*dma_host_set)(ide_drive_t *, int);
+ 	int (*dma_setup)(ide_drive_t *);
+ 	void (*dma_exec_cmd)(ide_drive_t *, u8);
+ 	void (*dma_start)(ide_drive_t *);
+ 	int (*ide_dma_end)(ide_drive_t *drive);
+-	int (*ide_dma_on)(ide_drive_t *drive);
+-	void (*dma_off_quietly)(ide_drive_t *drive);
+ 	int (*ide_dma_test_irq)(ide_drive_t *drive);
+ 	void (*ide_dma_clear_irq)(ide_drive_t *drive);
+-	void (*dma_host_on)(ide_drive_t *drive);
+-	void (*dma_host_off)(ide_drive_t *drive);
+ 	void (*dma_lost_irq)(ide_drive_t *drive);
+ 	void (*dma_timeout)(ide_drive_t *drive);
+ 
+@@ -766,7 +583,6 @@ typedef struct hwif_s {
  	int		rqsize;		/* max sectors per request */
  	int		irq;		/* our irq number */
  
@@ -173233,7 +194452,7 @@
  	unsigned long	dma_base;	/* base addr for dma ports */
  	unsigned long	dma_command;	/* dma command register */
  	unsigned long	dma_vendor1;	/* dma vendor 1 register */
-@@ -806,7 +626,6 @@ typedef struct hwif_s {
+@@ -806,7 +622,6 @@ typedef struct hwif_s {
  /*
   *  internal ide interrupt handler type
   */
@@ -173241,7 +194460,7 @@
  typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
  typedef int (ide_expiry_t)(ide_drive_t *);
  
-@@ -1020,7 +839,8 @@ int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
+@@ -1020,7 +835,8 @@ int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
  
  extern void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry);
  
@@ -173251,10 +194470,17 @@
  
  ide_startstop_t __ide_error(ide_drive_t *, struct request *, u8, u8);
  
-@@ -1062,52 +882,114 @@ extern void ide_end_drive_cmd(ide_drive_t *, u8, u8);
-  */
- extern int ide_wait_cmd(ide_drive_t *, u8, u8, u8, u8, u8 *);
+@@ -1054,60 +870,126 @@ extern int ide_do_drive_cmd(ide_drive_t *, struct request *, ide_action_t);
  
+ extern void ide_end_drive_cmd(ide_drive_t *, u8, u8);
+ 
+-/*
+- * Issue ATA command and wait for completion.
+- * Use for implementing commands in kernel
+- *
+- *  (ide_drive_t *drive, u8 cmd, u8 nsect, u8 feature, u8 sectors, u8 *buf)
+- */
+-extern int ide_wait_cmd(ide_drive_t *, u8, u8, u8, u8, u8 *);
 +enum {
 +	IDE_TFLAG_LBA48			= (1 << 0),
 +	IDE_TFLAG_NO_SELECT_MASK	= (1 << 1),
@@ -173307,6 +194533,14 @@
 +	IDE_TFLAG_IN_TF			= IDE_TFLAG_IN_NSECT |
 +					  IDE_TFLAG_IN_LBA,
 +	IDE_TFLAG_IN_DEVICE		= (1 << 29),
++	IDE_TFLAG_HOB			= IDE_TFLAG_OUT_HOB |
++					  IDE_TFLAG_IN_HOB,
++	IDE_TFLAG_TF			= IDE_TFLAG_OUT_TF |
++					  IDE_TFLAG_IN_TF,
++	IDE_TFLAG_DEVICE		= IDE_TFLAG_OUT_DEVICE |
++					  IDE_TFLAG_IN_DEVICE,
++	/* force 16-bit I/O operations */
++	IDE_TFLAG_IO_16BIT		= (1 << 30),
 +};
 +
 +struct ide_taskfile {
@@ -173337,7 +194571,7 @@
 +		u8 command;	/* write: command */
 +	};
 +};
-+
+ 
  typedef struct ide_task_s {
 -/*
 - *	struct hd_drive_task_hdr	tf;
@@ -173377,12 +194611,15 @@
 - * taskfile io for disks for now...and builds request from ide_ioctl
 - */
 -extern ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *);
--
++void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8);
+ 
 -/*
 - * Special Flagged Register Validation Caller
 - */
 -extern ide_startstop_t flagged_taskfile(ide_drive_t *, ide_task_t *);
-+void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8);
++ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *);
++
++void task_end_request(ide_drive_t *, struct request *, u8);
  
 -extern ide_startstop_t set_multmode_intr(ide_drive_t *);
 -extern ide_startstop_t set_geometry_intr(ide_drive_t *);
@@ -173390,7 +194627,7 @@
 -extern ide_startstop_t task_no_data_intr(ide_drive_t *);
 -extern ide_startstop_t task_in_intr(ide_drive_t *);
 -extern ide_startstop_t pre_task_out_intr(ide_drive_t *, struct request *);
-+ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *);
++u8 wait_drive_not_busy(ide_drive_t *);
  
 -extern int ide_raw_taskfile(ide_drive_t *, ide_task_t *, u8 *);
 +int ide_raw_taskfile(ide_drive_t *, ide_task_t *, u8 *, u16);
@@ -173398,16 +194635,34 @@
  
  int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long);
  int ide_cmd_ioctl(ide_drive_t *, unsigned int, unsigned long);
-@@ -1212,6 +1094,7 @@ enum {
+@@ -1133,10 +1015,9 @@ extern void do_ide_request(struct request_queue *);
+ 
+ void ide_init_disk(struct gendisk *, ide_drive_t *);
+ 
+-extern int ideprobe_init(void);
+-
+ #ifdef CONFIG_IDEPCI_PCIBUS_ORDER
+-extern void ide_scan_pcibus(int scan_direction) __init;
++extern int ide_scan_direction;
++int __init ide_scan_pcibus(void);
+ extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name);
+ #define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME)
+ #else
+@@ -1212,6 +1093,9 @@ enum {
  	IDE_HFLAG_IO_32BIT		= (1 << 24),
  	/* unmask IRQs */
  	IDE_HFLAG_UNMASK_IRQS		= (1 << 25),
 +	IDE_HFLAG_ABUSE_SET_DMA_MODE	= (1 << 26),
++	/* host is CY82C693 */
++	IDE_HFLAG_CY82C693		= (1 << 27),
  };
  
  #ifdef CONFIG_BLK_DEV_OFFBOARD
-@@ -1229,7 +1112,7 @@ struct ide_port_info {
- 	void			(*fixup)(ide_hwif_t *);
+@@ -1226,10 +1110,9 @@ struct ide_port_info {
+ 	void			(*init_iops)(ide_hwif_t *);
+ 	void                    (*init_hwif)(ide_hwif_t *);
+ 	void			(*init_dma)(ide_hwif_t *, unsigned long);
+-	void			(*fixup)(ide_hwif_t *);
  	ide_pci_enablebit_t	enablebits[2];
  	hwif_chipset_t		chipset;
 -	unsigned int		extra;
@@ -173415,7 +194670,50 @@
  	u32			host_flags;
  	u8			pio_mask;
  	u8			swdma_mask;
-@@ -1356,6 +1239,7 @@ static inline int ide_dev_is_sata(struct hd_driveid *id)
+@@ -1264,7 +1147,9 @@ static inline u8 ide_max_dma_mode(ide_drive_t *drive)
+ 	return ide_find_dma_mode(drive, XFER_UDMA_6);
+ }
+ 
++void ide_dma_off_quietly(ide_drive_t *);
+ void ide_dma_off(ide_drive_t *);
++void ide_dma_on(ide_drive_t *);
+ int ide_set_dma(ide_drive_t *);
+ ide_startstop_t ide_dma_intr(ide_drive_t *);
+ 
+@@ -1275,10 +1160,7 @@ extern void ide_destroy_dmatable(ide_drive_t *);
+ extern int ide_release_dma(ide_hwif_t *);
+ extern void ide_setup_dma(ide_hwif_t *, unsigned long, unsigned int);
+ 
+-void ide_dma_host_off(ide_drive_t *);
+-void ide_dma_off_quietly(ide_drive_t *);
+-void ide_dma_host_on(ide_drive_t *);
+-extern int __ide_dma_on(ide_drive_t *);
++void ide_dma_host_set(ide_drive_t *, int);
+ extern int ide_dma_setup(ide_drive_t *);
+ extern void ide_dma_start(ide_drive_t *);
+ extern int __ide_dma_end(ide_drive_t *);
+@@ -1290,7 +1172,9 @@ extern void ide_dma_timeout(ide_drive_t *);
+ static inline int ide_id_dma_bug(ide_drive_t *drive) { return 0; }
+ static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
+ static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
++static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; }
+ static inline void ide_dma_off(ide_drive_t *drive) { ; }
++static inline void ide_dma_on(ide_drive_t *drive) { ; }
+ static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
+ static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
+ #endif /* CONFIG_BLK_DEV_IDEDMA */
+@@ -1320,8 +1204,9 @@ extern void ide_unregister (unsigned int index);
+ void ide_register_region(struct gendisk *);
+ void ide_unregister_region(struct gendisk *);
+ 
+-void ide_undecoded_slave(ide_hwif_t *);
++void ide_undecoded_slave(ide_drive_t *);
+ 
++int ide_device_add_all(u8 *idx);
+ int ide_device_add(u8 idx[4]);
+ 
+ static inline void *ide_get_hwifdata (ide_hwif_t * hwif)
+@@ -1356,6 +1241,7 @@ static inline int ide_dev_is_sata(struct hd_driveid *id)
  	return 0;
  }
  
@@ -173423,6 +194721,16 @@
  u8 ide_dump_status(ide_drive_t *, const char *, u8);
  
  typedef struct ide_pio_timings_s {
+@@ -1418,4 +1304,9 @@ static inline ide_drive_t *ide_get_paired_drive(ide_drive_t *drive)
+ 	return &hwif->drives[(drive->dn ^ 1) & 1];
+ }
+ 
++static inline void ide_set_irq(ide_drive_t *drive, int on)
++{
++	drive->hwif->OUTB(drive->ctl | (on ? 0 : 2), IDE_CONTROL_REG);
++}
++
+ #endif /* _IDE_H */
 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
 index cae35b6..796019b 100644
 --- a/include/linux/init_task.h
@@ -178827,7 +200135,7 @@
  extern unsigned long image_size;
  extern int in_suspend;
 diff --git a/kernel/printk.c b/kernel/printk.c
-index 89011bf..423a8c7 100644
+index 89011bf..3b7c968 100644
 --- a/kernel/printk.c
 +++ b/kernel/printk.c
 @@ -573,11 +573,6 @@ static int __init printk_time_setup(char *str)
@@ -178910,18 +200218,16 @@
  
  	/*
  	 * Copy the output into log_buf.  If the caller didn't provide
-@@ -680,7 +702,9 @@ asmlinkage int vprintk(const char *fmt, va_list args)
+@@ -680,7 +702,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
  					loglev_char = default_message_loglevel
  						+ '0';
  				}
 -				t = printk_clock();
-+				t = 0;
-+				if (system_state != SYSTEM_BOOTING)
-+					t = ktime_to_ns(ktime_get());
++				t = cpu_clock(printk_cpu);
  				nanosec_rem = do_div(t, 1000000000);
  				tlen = sprintf(tbuf,
  						"<%c>[%5lu.%06lu] ",
-@@ -744,6 +768,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
+@@ -744,6 +766,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
  		printk_cpu = UINT_MAX;
  		spin_unlock(&logbuf_lock);
  		lockdep_on();
@@ -186237,6 +207543,17 @@
  #endif
  #ifdef CONFIG_COMPAT
  	{
+diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
+index a68425a..d8a5558 100644
+--- a/kernel/sysctl_check.c
++++ b/kernel/sysctl_check.c
+@@ -1,6 +1,5 @@
+ #include <linux/stat.h>
+ #include <linux/sysctl.h>
+-#include "../arch/s390/appldata/appldata.h"
+ #include "../fs/xfs/linux-2.6/xfs_sysctl.h"
+ #include <linux/sunrpc/debug.h>
+ #include <linux/string.h>
 diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
 index c8a9d13..8d6125a 100644
 --- a/kernel/time/clocksource.c
@@ -190038,3 +211355,17 @@
  	default:
  		return -EINVAL;
  	}
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index f83b19d..4bf715d 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -1744,6 +1744,9 @@ int security_genfs_sid(const char *fstype,
+ 	struct ocontext *c;
+ 	int rc = 0, cmp = 0;
+ 
++	while (path[0] == '/' && path[1] == '/')
++		path++;
++
+ 	POLICY_RDLOCK;
+ 
+ 	for (genfs = policydb.genfs; genfs; genfs = genfs->next) {

Modified: dists/trunk/linux-2.6/debian/patches/series/1~experimental.1
==============================================================================
--- dists/trunk/linux-2.6/debian/patches/series/1~experimental.1	(original)
+++ dists/trunk/linux-2.6/debian/patches/series/1~experimental.1	Sun Jan 27 21:43:29 2008
@@ -41,4 +41,4 @@
 + bugfix/all/fw-sbp2-incr-login-orb-reply-timeout.patch
 + bugfix/all/fw-sbp2-skip-unnecessary-logout.patch
 + bugfix/all/fw-sbp2-try-increase-reconnect_hold.patch
-+ bugfix/all/patch-2.6.24-git2
++ bugfix/all/patch-2.6.24-git3



More information about the Kernel-svn-changes mailing list